Compare commits

..

602 Commits

Author SHA1 Message Date
Elliott Hughes
a8d06766c7 Merge "[MIPS] __dso_handle.S and __dso_handle_so.S not needed." 2013-07-24 01:05:18 +00:00
Rom Lemarchand
061246b600 Merge "Restore dlmalloc mmap threshold to 64k" 2013-07-23 20:49:46 +00:00
Rom Lemarchand
d0f2a6014c Restore dlmalloc mmap threshold to 64k
Restoring DEFAULT_MMAP_THRESHOLD to 64k, the way it was before
999089181e.

This forces allocations in the 64k-256k range to be mmaped.

Change-Id: Iace55ed638edd272b3e94fa6cd2ddd349042be84
Signed-off-by: Rom Lemarchand <romlem@google.com>
2013-07-23 13:48:39 -07:00
Pete Delaney
0995a7b322 [MIPS] __dso_handle.S and __dso_handle_so.S not needed.
Global hidden variable __dso_handle is now declared in:
       bionic/libc/private/__dso_handle.h

Change-Id: I8e951a8d7c65877bafc1be23a7fff6d44d3a2846
Signed-off-by: Pete Delaney  <piet.delaney@imgtec.com>
Signed-off-by: Chao-Ying Fu  <chao-ying.fu@imgtec.com>
2013-07-22 23:16:02 -07:00
Elliott Hughes
6fe4a58f84 Merge "Move stuff only needed by pthread-timers.c into pthread-timers.c." 2013-07-20 00:10:32 +00:00
Elliott Hughes
4cf1395217 Move stuff only needed by pthread-timers.c into pthread-timers.c.
Change-Id: I4915b3fff9c4f5a36b4f51027fb22019c11607b0
2013-07-19 16:42:27 -07:00
Elliott Hughes
e8bd8c2ed9 Merge "Upgrade mktemp.c to the current upstream version." 2013-07-19 23:35:29 +00:00
Elliott Hughes
284f788032 Upgrade mktemp.c to the current upstream version.
Yet another archaic relic containing bugs that had been fixed years before the
Android project even started...

Bug: 9935113
Change-Id: I3c9d019a216efd609ee568cf8c70bc360f357403
2013-07-19 15:20:31 -07:00
Elliott Hughes
f8a66bb312 Merge "Remove some non-unused makefile generality." 2013-07-18 00:39:35 +00:00
Elliott Hughes
4c6b925bac Remove some non-unused makefile generality.
MIPS uses .c files like ARM and x86 now.

Change-Id: Ie580647e385121c380316c443ec199dabf657ff8
2013-07-17 17:38:45 -07:00
Elliott Hughes
8fa9081f48 Merge "[MIPS] Rewrite MIPS crtbegin* as C files." 2013-07-18 00:34:22 +00:00
Pete Delaney
368860124c [MIPS] Rewrite MIPS crtbegin* as C files.
This updates the MIPS arch to be much more in
sync with the commit Nick Kralevich made last
June; see 9d40326830.

    Rewrite
     crtbegin.S        -> crtbegin.c
     crtbegin_so.S     -> crtbegin_so.c
     __dso_handle.S    -> __dso_handle.c
     __dso_handle_so.S -> __dso_handle_so.c
     atexit.S          -> atexit.c

Previously __do_global_dtors_aux was in the tasks
__FINI_ARRAY__ linked with crtbegin.S and it now being
removed as there is no need to call a destructor just
before terminating a process.

Shared libraries, on the other hand, are linked with
crtbegin_so.c and have a hidden destructor declared
to allow the bionic linker to call __on_dlclose().

Change-Id: Ieb4da5199b54573de05743990e309db381a11cb8
Signed-off-by: Pete Delaney  <piet.delaney@imgtec.com>
Signed-off-by: Chao-Ying Fu  <chao-ying.fu@imgtec.com>
Signed-off-by: Chris Dearman <chris.dearman@imgtec.com>
2013-07-17 14:23:29 -07:00
Elliott Hughes
2be511d405 Merge "Improve stack overflow diagnostics (take 2)." 2013-07-17 20:48:25 +00:00
Elliott Hughes
84114c8dd5 Improve stack overflow diagnostics (take 2).
This reverts commits eb1b07469f and
d14dc3b87f, and fixes the bug where
we were calling mmap (which might cause errno to be set) before
__set_tls (which is required to implement errno).

Bug: 8557703
Change-Id: I2c36d00240c56e156e1bb430d8c22a73a068b70c
2013-07-17 13:33:19 -07:00
Elliott Hughes
40e7a87864 Merge "Prevent the madvise(MADV_MERGEABLE) mmap hack from affecting errno." 2013-07-17 20:20:41 +00:00
Elliott Hughes
107cdd406b Prevent the madvise(MADV_MERGEABLE) mmap hack from affecting errno.
Bug: 9889616
Change-Id: I4a7323e0ae5aeb5cbe0da1b2bc7501d83b3a2aa4
2013-07-17 13:12:26 -07:00
Elliott Hughes
b7b36b819e Merge "mmap: Reinstate passing MADV_MERGEABLE on private anonymous maps" 2013-07-17 20:02:11 +00:00
Rom Lemarchand
e459bba398 mmap: Reinstate passing MADV_MERGEABLE on private anonymous maps
Reinstate mmap calling madvise(MADV_MERGEABLE) removed in
635df850e5

(cherry-pick of c702a904679a36511bead29c51eeac15d81f4fd2.)

Change-Id: I18803fb54701b2b3d8186dff5c678211ee3efa1f
2013-07-17 13:00:45 -07:00
Guang Zhu
2cf5a6f662 Merge "Revert "Improve stack overflow diagnostics."" 2013-07-17 03:17:19 +00:00
Guang Zhu
d14dc3b87f Revert "Improve stack overflow diagnostics."
This reverts commit aa754dca90.

Change-Id: Ifa76eee31f7f44075eb3a48554315b2693062f44
2013-07-17 03:17:05 +00:00
Guang Zhu
8c1b96681a Merge "Revert "Clean up our alternate signal stacks."" 2013-07-17 03:16:52 +00:00
Guang Zhu
eb1b07469f Revert "Clean up our alternate signal stacks."
This reverts commit 5cf87951ab.

Change-Id: Idd6ca7d80a018755da3bd315d91193723ce7f3bf
2013-07-17 03:16:04 +00:00
Christopher Ferris
f63c28f033 Merge "Fix assembler errors in generic arm strlen.c." 2013-07-17 00:19:42 +00:00
Christopher Ferris
6f4fed74cb Merge "Add new optimized strlen for arm." 2013-07-17 00:19:30 +00:00
Christopher Ferris
9ad2a73ed6 Fix assembler errors in generic arm strlen.c.
Tested using a static version of the strlen libc_test program
on a nexus7 that uses the generic code.

Merge from internal master.

(cherry-picked from d8d10a8994)

Change-Id: I88f7dc01dc5b5c3ac2d5580d92153bc1bc36c564
2013-07-16 16:47:54 -07:00
Christopher Ferris
0aa9b52efa Add new optimized strlen for arm.
This optimized version is primarily targeted at cortex-a15.

Tested on all nexus devices using the system/extras/libc_test strlen test.
Tested alignments from 1 to 32 that are powers of 2.
Tested that strlen does not cross page boundaries at all alignments.

Speed improvements listed below:

cortex-a15
- Sizes >= 32 bytes, ~75% improvement.
- Sizes >= 1024 bytes, ~250% improvement.

cortex-a9
- Sizes >= 32 bytes, ~75% improvement.
- Sizes >= 1024 bytes, ~85% improvement.

krait
- Sizes >= 32 bytes, ~95% improvement.
- Sizes >= 1024 bytes, ~160% improvement.

Merge from internal master.

(cherry-picked from 2fc0717977)

Change-Id: I1ceceb4e745fd68e9d946f96d1d42e0cdaff6ccf
2013-07-16 16:47:37 -07:00
Elliott Hughes
f35e0c149f Merge "Clean up our alternate signal stacks." 2013-07-16 22:47:41 +00:00
Elliott Hughes
5cf87951ab Clean up our alternate signal stacks.
Bug: 8557703
Change-Id: Ie93901dd1c29e9d3bf795b0f0400616d9ef08f75
2013-07-16 14:35:52 -07:00
Elliott Hughes
026867c7dc Merge "Improve stack overflow diagnostics." 2013-07-16 20:38:55 +00:00
Elliott Hughes
aa754dca90 Improve stack overflow diagnostics.
We notify debuggerd of problems by installing signal handlers. That's
fine except for when the signal is caused by us running off the end of
a thread's stack and into the guard page.

Bug: 8557703
Change-Id: I1ef65b4bb3bbca7e9a9743056177094921e60ed3
2013-07-16 13:14:24 -07:00
Elliott Hughes
9562d38df1 Merge "Clean up __builtin_expect usage." 2013-07-16 19:52:30 +00:00
Elliott Hughes
d4e753fea9 Clean up __builtin_expect usage.
Also remove some dead code; our malloc debugging doesn't use this
any more.

Change-Id: Id69cf182371f5f37d40b5bbd08f2744ade286e66
2013-07-16 12:45:46 -07:00
Elliott Hughes
f152e386fc Merge "EABI syscall cleanup." 2013-07-16 19:35:58 +00:00
Elliott Hughes
da4a3e6515 EABI syscall cleanup.
We cleaned up the auto-generated ones a while back to not touch
the stack unnecessarily if they have <= 4 arguments. This patch
cleans up some hand-crafted ones.

Also improve comments in clone.S.

Change-Id: I8850bf98f2b26829385315304472a760e6880ed8
2013-07-16 11:52:24 -07:00
Elliott Hughes
67750c8515 Merge "Fix pthread_getattr_np, pthread_attr_setguardsize, and pthread_attr_setstacksize." 2013-07-15 23:39:39 +00:00
Elliott Hughes
b95cf0d23a Fix pthread_getattr_np, pthread_attr_setguardsize, and pthread_attr_setstacksize.
pthread_getattr_np was reporting the values supplied to us, not the values we
actually used, which is kinda the whole point of pthread_getattr_np.

pthread_attr_setguardsize and pthread_attr_setstacksize were reporting EINVAL
for any size that wasn't a multiple of the system page size. This is
unnecessary. We can just round like POSIX suggests and glibc already does.

Also improve the error reporting for pthread_create failures.

Change-Id: I7ebc518628a8a1161ec72e111def911d500bba71
2013-07-15 14:51:07 -07:00
Elliott Hughes
59ed029b28 Merge "Upgrade to tzcode2013d." 2013-07-15 17:08:41 +00:00
Elliott Hughes
ce4783ce76 Upgrade to tzcode2013d.
Well, kinda... localtime.c still contains a bunch of Android-specific
hacks, as does strftime.c. But the other files are now exactly the same
as upstream.

This catches up with several years of bug fixes, and fixes most of the
compiler warnings that were in this code. (Just two remain.)

Bug: 1744909
Change-Id: I2ddfecb6fd408c847397c17afb0fff859e27feef
2013-07-12 17:52:44 -07:00
Elliott Hughes
3db1f359e9 Merge "Add a trivial gmtime(3) test." 2013-07-12 21:23:48 +00:00
Elliott Hughes
ee178bfb79 Add a trivial gmtime(3) test.
Change-Id: I849f200a455cd6971646fa45766ab22ba19fb866
2013-07-12 11:25:20 -07:00
Elliott Hughes
8a363692be Merge "Add TCP_INFO state enum values." 2013-07-11 23:58:48 +00:00
Elliott Hughes
0dff43cab4 Add TCP_INFO state enum values.
Bug: https://code.google.com/p/android/issues/detail?id=38881
Change-Id: Ie22816c666474e6441e0ec3afd2a6eb04d64a673
2013-07-11 16:21:32 -07:00
Nick Kralevich
d30877ae28 Merge "syslog.h: add __printflike to syslog functions" 2013-07-11 15:32:49 +00:00
Nick Kralevich
6b1dd1797e syslog.h: add __printflike to syslog functions
Allow the compiler to detect formating bugs.

Change-Id: I6a4af6cae59dc3adf14b075431a41885213a649a
2013-07-10 07:37:11 -07:00
Elliott Hughes
b7f4923752 Merge "Fix MIPS build." 2013-07-09 21:19:15 +00:00
Elliott Hughes
6184c1feaa Fix MIPS build.
Change-Id: I583b1794dbc12fcded8c4f57f367593a742ab33f
2013-07-09 14:18:36 -07:00
Elliott Hughes
9d476716c2 Merge "Add <sys/statvfs.h>." 2013-07-09 20:43:13 +00:00
Elliott Hughes
06040fd75c Add <sys/statvfs.h>.
Bug: 2512019
Change-Id: I6e7fd3fa281977cc4bc270481a95416b5b2dc351
2013-07-09 13:25:03 -07:00
Elliott Hughes
4fc8a0c116 Merge "Remove <netinet/icmp6.h> ni_* macros." 2013-07-08 22:08:12 +00:00
Elliott Hughes
c2cd33efc4 Remove <netinet/icmp6.h> ni_* macros.
glibc doesn't have these, and they prevent ping from building out of
the box because it assumes it can define them.

Bug: 9671560
Change-Id: I815f2a9c4fd96a0ea2952eb5a71ddf51e0763660
2013-07-08 15:07:41 -07:00
Elliott Hughes
ff145277da Merge "Upgrade to tzdata2013d." 2013-07-08 21:53:02 +00:00
Elliott Hughes
5149de09ad Upgrade to tzdata2013d.
From the release notes:

  Changes affecting future time stamps:

    Morocco's midsummer transitions this year are July 7 and August 10,
    not July 9 and August 8.  (Thanks to Andrew Paprocki.)

    Israel now falls back on the last Sunday of October.
    (Thanks to Ephraim Silverberg.)

  Changes affecting past time stamps:

    Specify Jerusalem's location more precisely; this changes the pre-1880
    times by 2 s.

  Changing affecting metadata only:

    Fix typos in the entries for country codes BQ and SX.

Change-Id: I87f992e4c406d4f254dc274f206536e9484024a2
2013-07-08 14:51:15 -07:00
Elliott Hughes
e31c45c9fa Merge "Make bionic's <netinet/icmp6.h> standalone, like glibc's." 2013-07-08 18:17:57 +00:00
Elliott Hughes
3a040d8379 Make bionic's <netinet/icmp6.h> standalone, like glibc's.
This removes the need for a bionic-specific hack in external/iproute2.

Bug: 9671560
Change-Id: I9f15be0711d26bca863bd66be9a39606500fb948
2013-07-08 11:16:24 -07:00
Elliott Hughes
6b33f31b09 Merge "Fix IPv6 filtering definitions in netinet/icmp6.h." 2013-07-08 17:32:17 +00:00
Lorenzo Colitti
bfc6a59556 Fix IPv6 filtering definitions in netinet/icmp6.h.
Linux and *BSD kernels use opposite values to indicate pass/block
in ICMPv6 filters, and assign a different value to the
ICMP6_FILTER sockopt.

Bug: 9671560
Bug: 9469682
Change-Id: Ic0f1fcd48891add992acf97632f60aebd172c1d7
2013-07-08 10:31:29 -07:00
Elliott Hughes
e63ea6aca7 Merge "Clean up <sys/mount.h>/<linux/fs.h> duplication." 2013-07-08 17:08:52 +00:00
Elliott Hughes
e42e51d563 Clean up <sys/mount.h>/<linux/fs.h> duplication.
Roll on uapi...

Bug: 6340120
Change-Id: Ic9521905683946e836574e4d768e34853ea777fa
2013-07-03 14:58:04 -07:00
Nick Kralevich
84983592ad Merge "bionic_atomic_arm.h: Remove < ARMv6 support" 2013-07-03 21:25:35 +00:00
Nick Kralevich
e91f71783b bionic_atomic_arm.h: Remove < ARMv6 support
This is dead code for most modern Android devices.

Bug: 9674955
Change-Id: Ic63a66b0331a0f07b9183f14a1d5e678c25e4b12
2013-07-03 14:14:06 -07:00
Elliott Hughes
de2781d906 Merge changes I01345c23,I3b57517c
* changes:
  Include linux/termios.h from sys/ioctl.h.
  Update linux/types.h to linux 2.6.20.
2013-07-03 20:37:28 +00:00
Lorenzo Colitti
f936ef1a9f Include linux/termios.h from sys/ioctl.h.
On NetBSD and glibc, including sys/ioctl.h provides some
terminal ioctl data structures such as struct winsize. For
compatibility, provide these via sys/ioctl.h in bionic as well.
bionic does not have its own definitions for these structures, so
get them from the Linux kernel definitions.

Change-Id: I01345c23c0bebd60b0a80fc33668e7c0ad7356c3
2013-07-03 13:35:18 -07:00
Lorenzo Colitti
d7b0d6e1e1 Update linux/types.h to linux 2.6.20.
This picks up the source kernel header change
I984154487c38f6fa827bd78234f7fc2e4e1c383f .

Bug: 9469682
Change-Id: I3b57517cc6442ba4d0b8003d7398617e8226233f
2013-07-03 13:15:20 -07:00
Elliott Hughes
ebc8ce1de6 Merge "libc/arch-arm/bionic/memcpy.a9.S: memcpy from cortex-strings." 2013-07-03 17:21:47 +00:00
Will Newton
2753e12af5 libc/arch-arm/bionic/memcpy.a9.S: memcpy from cortex-strings.
This memcpy code uses NEON/VFP to achieve very good performance
on ARMv7-A processors. It is specifically tuned for A15 but should
provide good performance on A9 also. It is equivalent to the code
in cortex-strings rev 116.

This patch is a follow up the existing gerrit change:

I7f6f77995f3ca903ad9c66d14261441667a2a935

This version includes a tweak for performance on misaligned
buffers and splits the header comment into license and
documentation sections.

Change-Id: Ibd2e23c8d8e01357ba0247be1d05192de3ceba69
Signed-off-by: Will Newton <will.newton@linaro.org>
2013-07-03 10:20:43 -07:00
Elliott Hughes
87b4286f09 Merge "Expose dn_comp and dn_expand to system C code." 2013-07-02 23:00:38 +00:00
Lorenzo Colitti
b8e435c1d5 Expose dn_comp and dn_expand to system C code.
This is needed to compile open-source code that wants to
expand/compress domain names itself, such as ping6.

Bug: 9469682
Change-Id: I339c6538936d05c031bc6fb0a8793aaf1429dea4
2013-07-02 15:57:57 -07:00
Elliott Hughes
feec97a7c7 Merge "Fix inttypes.h PRI?PTR and SCN?PTR macros." 2013-07-02 22:42:49 +00:00
Elliott Hughes
74f0833df6 Fix inttypes.h PRI?PTR and SCN?PTR macros.
Our intptr_t and uintptr_t aren't "long". Add a compilation test so we remember
to fix this to cope with 32- and 64-bit later.

Bug: http://code.google.com/p/android/issues/detail?id=57218
Change-Id: I2f816d339edb4f7d57e4418b818fb4c602093f38
2013-07-02 15:35:27 -07:00
Elliott Hughes
1ba3a2c269 Merge "Update icmp6.h to current NetBSD." 2013-07-02 16:40:42 +00:00
Lorenzo Colitti
ce7c404dc7 Update icmp6.h to current NetBSD.
This adds a few bits we need such as RFC 6106 support and the
definition of MLD_LISTENER_REDUCTION.

http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/netinet/icmp6.h?rev=1.47

Bug: 9469682
Change-Id: I97a4c2f0893012ce315334367c055097d0f8bb10
2013-07-02 09:38:59 -07:00
Christopher Ferris
7c14d67bc1 Merge "libc/arch-arm/bionic/memcpy.a9.S: memcpy from cortex-strings." 2013-07-01 17:29:07 +00:00
Nick Kralevich
413eef716f Merge "More FORTIFY_SOURCE functions under clang" 2013-07-01 16:57:05 +00:00
Will Newton
b61103dff4 libc/arch-arm/bionic/memcpy.a9.S: memcpy from cortex-strings.
This memcpy code uses NEON/VFP to achieve very good performance
on ARMv7-A processors. It is specifically tuned for A15 but should
provide good performance on A9 also. It is equivalent to the code
in cortex-strings rev 116.

This patch is a follow up the existing gerrit change:

I7f6f77995f3ca903ad9c66d14261441667a2a935

But this version includes a tweak for performance on misaligned
buffers.

Change-Id: I285abac0068f8ae29a1cbf7862ea8590aadaf0a7
Signed-off-by: Will Newton <will.newton@linaro.org>
2013-07-01 11:15:27 +01:00
Nick Kralevich
a6cde39276 More FORTIFY_SOURCE functions under clang
* bzero
* umask
* strlcat

Change-Id: I65065208e0b8b37e10f6a266d5305de8fa9e59fc
2013-06-29 08:16:22 -07:00
Nick Kralevich
227b47a461 Merge "resolv_private.h: remove #define b64_ntop and b64_pton" 2013-06-28 20:31:11 +00:00
Nick Kralevich
ca43d73d23 resolv_private.h: remove #define b64_ntop and b64_pton
This was gated off of "#ifndef ADNROID_CHANGES" (note mispelling)
and is unconditionally defined in libc/include/resolv.h
(which this file includes). No need for duplicate definitions.

Change-Id: I00719bcf39eaa26eb96ab4274f171f3d2b5bae61
2013-06-28 13:18:32 -07:00
Nick Kralevich
394df35e64 Merge "Reorganize FORTIFY_SOURCE tests." 2013-06-28 20:05:57 +00:00
Nick Kralevich
5bcf39842e Reorganize FORTIFY_SOURCE tests.
Get rid of a lot of the duplication in the various FORTIFY_SOURCE
tests. Instead, we build 4 separate static libraries, with
4 different compile time options, and link them into the final test
binary.

Change-Id: Idb0b7cccc8dd837adb037bf4ddfe8942ae138230
2013-06-28 11:54:29 -07:00
Nick Kralevich
78a7bf998d Merge "stdio.h: enable vs?printf clang FORTIFY_SOURCE" 2013-06-27 17:13:25 +00:00
Nick Kralevich
c8ae8bd941 stdio.h: enable vs?printf clang FORTIFY_SOURCE
Enable FORTIFY_SOURCE protections under clang for the following
functions:

  * vsprintf
  * vsnprintf

and add unittests.

Change-Id: I90f8a27f7b202c78b5dd8ebf53050bf9e33496f7
2013-06-27 09:17:48 -07:00
Colin Cross
fbec57d46c Merge changes Ib496e818,I074204e9
* changes:
  bionic: add compatibility mode for properties
  bionic: use the size of the file to determine property area size
2013-06-26 00:03:24 +00:00
Colin Cross
5e9a086145 bionic: add compatibility mode for properties
Allow a new bionic to work with an old init property area by supporting
the old format.

(cherry picked from commit ad76c85b9c)

Change-Id: Ib496e818a62a5834d40c71eb4745783d998be893
2013-06-25 16:52:40 -07:00
Colin Cross
1ec20a086c bionic: use the size of the file to determine property area size
On the reader size, don't assume that the property size is PA_SIZE,
read it from the size of the file.  Allows init to use a different
property size without recompiling statically linked executables.

(cherry picked from commit 285b42a04c)

Change-Id: I074204e9e6591b35faf7c1c58fb11ec162aff7bf
2013-06-25 16:52:34 -07:00
Elliott Hughes
b116bf7628 Merge "Switch to current upstream getopt_long." 2013-06-25 22:56:59 +00:00
Rom Lemarchand
995f17e6a9 Merge "libc: add swapon and swapoff syscalls" 2013-06-25 22:25:31 +00:00
Elliott Hughes
d278b828fe Switch to current upstream getopt_long.
Change-Id: I4c646dcb8be9e88dd54d069a03bbc5fbfd92de03
2013-06-25 14:56:17 -07:00
Elliott Hughes
c76550b337 Merge "Update x86 machine/endian.h from upstream" 2013-06-25 21:20:18 +00:00
Pavel Chupin
64a4f6adf7 Update x86 machine/endian.h from upstream
After download new version from upstream (OpenBSD 1.17) did the
following:
 * changed all u_int* types to uint*
 * add #include <sys/types.h>

All these changes are Android-specific and had been done before for
previous version (1.14).

Bug: http://code.google.com/p/android/issues/detail?id=54465
Change-Id: Ieb44e7fce4e794d997bb00ee0dd417fb61521720
Signed-off-by: Pavel Chupin <pavel.v.chupin@intel.com>
2013-06-25 14:17:58 -07:00
Rom Lemarchand
d206b560e7 libc: add swapon and swapoff syscalls
Change-Id: Ie79dc8e3f2ff1cd427dd6d95e3850920c4b407b0
Signed-off-by: Rom Lemarchand <romlem@google.com>
2013-06-25 13:18:03 -07:00
Elliott Hughes
7d624e9aff Merge "Kernel dso support for 'dl_iterate_phdr' function" 2013-06-25 20:15:30 +00:00
Sergey Melnikov
c45087bffa Kernel dso support for 'dl_iterate_phdr' function
Kernel provides virtual DSO for stack unwinding/exception handlind info for
signal usage case. Stack unwinding routines use 'dl_iterate_phdr' function
for additional DWARF info gathering from DSOs. Patch enables virtual DSO
enumeration via dl_iterate_phdr function.

Signed-off-by: Sergey Melnikov <sergey.melnikov@intel.com>
Change-Id: Ic2882b28f40b456a088bc1e63c50cbfda7e4a102
2013-06-25 13:12:39 -07:00
Nick Kralevich
bfacb603e4 Merge "libc: enable FORTIFY_SOURCE snprintf under clang" 2013-06-25 17:26:20 +00:00
Nick Kralevich
621b19dddb libc: enable FORTIFY_SOURCE snprintf under clang
Change-Id: I8b8059782a720104722b0841994b38f873ed02aa
2013-06-25 10:02:35 -07:00
Colin Cross
1642edb520 Merge changes Ib074192d,I6df3afed,I69070455,Icbe31908,Id3fa4526,I038d451f
* changes:
  bionic: store property names as variable-length strings
  bionic: prevent root processes from calling __system_property_add
  bionic: revert to a single (larger) property area
  bionic: reimplement property area as hybrid trie/binary tree
  bionic: add missing memory barriers to system properties
  bionic: make property area expandable
2013-06-25 00:07:53 +00:00
Greg Hackmann
836dbf65e4 bionic: store property names as variable-length strings
Names are immutable, so the fixed-sized arrays can be replaced with
variable-length ones to save memory (especially on internal tree nodes).

Signed-off-by: Greg Hackmann <ghackmann@google.com>

(cherry picked from commit 492ce95d9f)

Change-Id: Ib074192d1b71150233d78c58e9ffcf7ecf688b6b
2013-06-24 16:35:46 -07:00
Colin Cross
1d36ee1a6e bionic: prevent root processes from calling __system_property_add
If a root process other than init calls __system_property_add, which
it should never do, it will break the design assumption that there is
only one mutator.

Pass O_EXCL to open() in map_prop_region_rw to ensure that only one
process ever has the property pages open for write.

(cherry picked from commit fb9b7b436f)

Change-Id: I6df3afedbfb5d07891b095aa24b78278381a5aaf
2013-06-24 16:35:41 -07:00
Greg Hackmann
1540f601be bionic: revert to a single (larger) property area
d329697 is too complicated.  Change the multiple property pages back to
a single 128K property area that's mapped in entirely at initialization
(the memory will not get allocated until the pages are touched).

d329697 has other changes useful for testing (moving property area
initialization inside bionic and adding __system_property_set_filename)
so undo the change manually rather than with git revert.

Signed-off-by: Greg Hackmann <ghackmann@google.com>

(cherry picked from commit 5f05348c18)

Change-Id: I690704552afc07a4dd410277893ca9c40bc13e5f
2013-06-24 16:35:37 -07:00
Greg Hackmann
996cdc4b1a bionic: reimplement property area as hybrid trie/binary tree
See the comments for an explanation of how properties are stored.

The trie structure is designed to scale better than the previous
array-based implementation.  Searching an array with n properties
required average O(n) string compares of the entire key; searching the
trie requires average O(log n) string compares of each token (substrings
between '.' characters).

Signed-off-by: Greg Hackmann <ghackmann@google.com>

(cherry picked from commit 6ac8e6a46d)

Change-Id: Icbe31908572f33b4d9b85d5b62ac837cbd0f85e0
2013-06-24 16:35:32 -07:00
Greg Hackmann
f7511e3bc9 bionic: add missing memory barriers to system properties
1) Reading the value must finish before checking whether it's intact

2) Setting the serial's dirty bit must visible before modifying the
value

3) The modified value must be visible before clearing the serial's dirty
bit

4) New properties and their TOC entries must be visible before updating
the property count

Signed-off-by: Greg Hackmann <ghackmann@google.com>

(cherry picked from commit 5bfa3ee8b3)

Change-Id: Id3fa45261fc2df2ae493ab5194bc2b6bff04e966
2013-06-24 16:35:27 -07:00
Greg Hackmann
cb215a7e9e bionic: make property area expandable
The property area is initially one 4K region, automatically expanding as
needed up to 64 regions.

To avoid duplicating code, __system_property_area_init() now allocates
and initializes the first region (previously it was allocated in init's
init_property_area() and initialized in bionic).  For testing purposes,
__system_property_set_filename() may be used to override the file used
to map in regions.

Signed-off-by: Greg Hackmann <ghackmann@google.com>

(cherry picked from commit d32969701b)

Change-Id: I038d451fe8849b0c4863663eec6f57f6521bf4a7
2013-06-24 16:35:15 -07:00
Nick Kralevich
0ea1d5c0ae Merge "libc: enable sprintf FORTIFY_SOURCE under clang" 2013-06-24 22:15:12 +00:00
Nick Kralevich
c6eb985454 libc: enable sprintf FORTIFY_SOURCE under clang
clang doesn't support __builtin_va_arg_pack(), so we have
to use #define instead.

Change-Id: I2ee75e6267d60cdf997fee6b9b0547bf68f062a1
2013-06-24 14:10:29 -07:00
Nick Kralevich
53ddcc9070 Merge "linker: Emit a warning on text relocations" 2013-06-21 22:36:50 +00:00
Nick Kralevich
c9084427aa linker: Emit a warning on text relocations
Text relocations unnecessarily mark pages as dirty, preventing them
from being swapped out, wasting memory. Also, text relocations
prevent the code from running on certain hardened systems.

Print a message in logcat and stderr when we see a text relocation,
to encourage developers to fix their code.

Change-Id: I6051a7463911e090ae5727a355397d539669d5b9
2013-06-21 15:33:15 -07:00
Nick Kralevich
6819773103 Merge "libc_logging: don't keep file descriptors open forever" 2013-06-21 21:26:09 +00:00
Nick Kralevich
17fc25d20f libc_logging: don't keep file descriptors open forever
Avoid keeping unnecessary file descriptors around when they're not
needed. Libc doesn't log so much that opening / closing overhead
matters.

Change-Id: I590ec5c27562db9bac025f781c48ec9a7724ce77
2013-06-21 13:28:42 -07:00
Nick Kralevich
0ce28d20ea Merge "libc: enable FORTIFY_SOURCE clang strlcpy" 2013-06-20 19:27:48 +00:00
Nick Kralevich
8bafa7452e libc: enable FORTIFY_SOURCE clang strlcpy
Change-Id: Idcfe08f5afc3dde592416df9eba83f64e130c7c2
2013-06-20 12:17:44 -07:00
Elliott Hughes
4eed65090b Merge "stdint.h header is not fully compatible with C99(ISO9899:1999)" 2013-06-19 19:45:09 +00:00
Nick Kralevich
02ca0e3142 Merge "Fix FORTIFY_SOURCE unittests." 2013-06-19 17:33:57 +00:00
Colin Cross
3225f49848 Merge "bionic: add __system_property_foreach" 2013-06-19 17:26:17 +00:00
Nick Kralevich
3cd4cac2ce Fix FORTIFY_SOURCE unittests.
The compiler is too damn smart.

Change-Id: Ibef3ef41ec99f8cd9c06f1dbca535819f9a08197
2013-06-19 10:25:44 -07:00
Sergey Melnikov
dc5d3426d8 stdint.h header is not fully compatible with C99(ISO9899:1999)
stdint.h provides macros with incorrect type:
  * UINT8_C
  * UINT16_C
  * UINT8_MAX
  * UINT16_MAX

Signed-off-by: Sergey Melnikov <sergey.melnikov@intel.com>
Change-Id: I2d130c782d4485bf6c9e9f068de0bdaa4ba7303f
2013-06-19 12:33:31 +04:00
Greg Hackmann
c6ff844d75 bionic: add __system_property_foreach
find_nth() will be inefficient on a trie.  Since find_nth() is only used
internally and only for enumerating properties, we can add a foreach()
function to do this directly.

Signed-off-by: Greg Hackmann <ghackmann@google.com>

(cherry picked from commit 577418403d)

Change-Id: Iaca97d1182ce2c28863ba85241cbb5cf6185eb2f
2013-06-18 19:24:29 -07:00
Elliott Hughes
c656d732c7 Merge "Always use v1 for MIPS TLS access." 2013-06-18 20:56:25 +00:00
Elliott Hughes
a33dc57c7c Always use v1 for MIPS TLS access.
Change-Id: Ic2850b90185cfbc5b0eff804c8b74a1c553c0852
2013-06-18 13:26:22 -07:00
Nick Kralevich
0846109c96 Merge "libc: Rename fortify error functions." 2013-06-18 20:24:04 +00:00
Nick Kralevich
dd0880fec3 Merge "libc: add limited FORTIFY_SOURCE support for clang" 2013-06-18 20:23:49 +00:00
Elliott Hughes
6807af773f Merge "Make LD_PRELOAD failures just warnings." 2013-06-18 20:23:07 +00:00
Colin Cross
02002443d4 Merge "bionic: fix deleting property arrays in property benchmark" 2013-06-18 20:22:25 +00:00
Nick Kralevich
e2fb05b45b Merge "libc: Introduce __errordecl()" 2013-06-18 20:15:33 +00:00
Elliott Hughes
7e5a8cc523 Make LD_PRELOAD failures just warnings.
This matches glibc and makes life easier for developers who want to
sometimes preload a library from init (which has no conditionals); they
can simply move/remove the library to disable.

Change-Id: I579b8633f958235af6e46bb53b378b9e363afb1f
2013-06-18 13:15:00 -07:00
Colin Cross
7d06813d93 bionic: fix deleting property arrays in property benchmark
Use delete[] to delete arrays allocated with new []

Change-Id: Icc2a6b23df09049c008f7f1f50ed93a277174308
2013-06-18 13:08:28 -07:00
Nick Kralevich
a641c18f0c libc: Rename fortify error functions.
__umask_error -> __umask_invalid_mode
__creat_error -> __creat_missing_mode
__too_many_args_error -> __creat_too_many_args

Change-Id: I4036f344a3a93628e70f2e948ad73cfed3a967ea
2013-06-18 13:07:18 -07:00
Colin Cross
0005b3544b Merge "bionic: change properties benchmarks to read a single property" 2013-06-18 20:01:05 +00:00
Colin Cross
7d90cfa6b5 bionic: change properties benchmarks to read a single property
The properties benchmarks were reading n properties from a property
area with n properties in it, which was making it hard to compare
the time between runs of different sizes.  Change the benchmark
to read a random property per iteration so the numbers between
runs are comparable.

Change-Id: Ib1648ce0948d9038fce76d209608427376cfb8da
2013-06-18 12:55:52 -07:00
Nick Kralevich
16d1af167f libc: add limited FORTIFY_SOURCE support for clang
In 829c089f83, we disabled all
FORTIFY_SOURCE support when compiling under clang. At the time,
we didn't have proper test cases, and couldn't easily create targeted
clang tests.

This change re-enables FORTIFY_SOURCE support under clang for a
limited set of functions, where we have explicit unittests available.
The functions are:

* memcpy
* memmove
* strcpy
* strncpy
* strcat
* strncat
* memset
* strlen (with modifications)
* strchr (with modifications)
* strrchr (with modifications)

It may be possible, in the future, to enable other functions. However,
I need to write unittests first.

For strlen, strchr, and strrchr, clang unconditionally calls the
fortified version of the relevant function. If it doesn't know the
size of the buffer it's dealing with, it passes in ((size_t) -1),
which is the largest possible size_t.

I added two new clang specific unittest files, primarily copied
from fortify?_test.cpp.

I've also rebuild the entire system with these changes, and didn't
observe any obvious problems.

Change-Id: If12a15089bb0ffe93824b485290d05b14355fcaa
2013-06-18 12:14:20 -07:00
Nick Kralevich
b24c0637d0 libc: Introduce __errordecl()
Define __errordecl and replace __attribute__((__error__("foo")))
with __errordecl. Make sure __errordecl is a no-op on clang, as it
generates a compile time warning.

Change-Id: Ifa1a2d3afd6881de9d479fc2adac6737871a2949
2013-06-18 12:13:52 -07:00
Colin Cross
977a33137d Merge changes Iac00ce10,I192d3825
* changes:
  bionic: add tests for properties
  bionic: move system property writing from init to bionic
2013-06-18 01:11:58 +00:00
Colin Cross
b27e200ad6 bionic: add tests for properties
(cherry picked from commit 37d9f75dde)

Change-Id: Iac00ce10a4272032a1cbdbc4204277d6876e3365
2013-06-17 16:58:47 -07:00
Colin Cross
5cf32de7a0 bionic: move system property writing from init to bionic
Move the implementation of writing to the system property area
from init to bionic, next to the reader implementation.  This
will allow full property testing to be added to bionic tests.

Add new accessor and waiting functions to hide the implementation
from watchprops and various bionic users.

Also hide some of the implementation details of the property area
from init by moving them into _system_properties.h, and other details
from everybody by moving them into system_properties.h.

(cherry picked from commit dc1038b790)

Change-Id: I192d3825ee276c5047bc751039fe6cfe226a7cca
2013-06-17 16:58:43 -07:00
Elliott Hughes
5995bf880e Merge "don't hardcode register r0/v1 when reading the TLS" 2013-06-17 21:51:50 +00:00
Mathias Agopian
b6e340080a don't hardcode register r0/v1 when reading the TLS
this leads to much improved code when calling __get_tls()

Change-Id: I21d870fb33c33a921ca55c4e100772e0f7a8d1e4
2013-06-17 14:50:30 -07:00
Elliott Hughes
657d0da751 Merge "Slight script cleanup; make gensyscalls work from any directory." 2013-06-17 18:16:36 +00:00
Elliott Hughes
18bc975bfe Slight script cleanup; make gensyscalls work from any directory.
Also remove a ton of dead code.

Change-Id: I1315623695a004f643b155f121cbafe24b715b8a
2013-06-17 10:39:33 -07:00
Elliott Hughes
560e9f7e7a Merge "Ensure that <stdint.h> defines SIZE_MAX and friends." 2013-06-13 23:19:09 +00:00
Elliott Hughes
7c89506e3a Ensure that <stdint.h> defines SIZE_MAX and friends.
We were missing SIG_ATOMIC_MAX, SIG_ATOMIC_MIN, SIZE_MAX,
WCHAR_MAX, WCHAR_MIN, WINT_MAX, and WINT_MIN.

Change-Id: I2535f36bc220fbaea009b483599b7af811c4cb5c
2013-06-13 16:02:53 -07:00
Elliott Hughes
c843a3e7b2 Merge "Fix the qsort copyright notice (fixed upstream this afternoon)." 2013-06-13 00:44:05 +00:00
Elliott Hughes
4eeec44e29 Fix the qsort copyright notice (fixed upstream this afternoon).
Change-Id: I786feb42719bceaa7da91565e350c1333b0d301f
2013-06-12 17:42:43 -07:00
Elliott Hughes
c843d7667a Merge "Handles spurious wake-ups in pthread_join()" 2013-06-13 00:31:50 +00:00
msg555
0f020d18b1 Handles spurious wake-ups in pthread_join()
Removed 'join_count' from pthread_internal_t and switched to using the flag
PTHREAD_ATTR_FLAG_JOINED to indicate if a thread is being joined. Combined with
a switch to a while loop in pthread_join, this fixes spurious wake-ups but
prevents a thread from being joined multiple times. This is fine for
two reasons:

1) The pthread_join specification allows for undefined behavior when multiple
   threads try to join a single thread.

2) There is no thread safe way to allow multiple threads to join a single
   thread with the pthread interface.  The second thread calling pthread_join
   could be pre-empted until the thread is destroyed and its handle reused for
   a different thread.  Therefore multi-join is always an error.

Bug: https://code.google.com/p/android/issues/detail?id=52255
Change-Id: I8b6784d47620ffdcdbfb14524e7402e21d46c5f7
2013-06-12 17:30:58 -07:00
Elliott Hughes
92e841d0aa Merge "Take upstream libm changes." 2013-06-13 00:29:21 +00:00
Elliott Hughes
78419467a2 Take upstream libm changes.
Mostly workarounds for GCC and Clang bugs.

Change-Id: I4ef428a42d4ac6d622659053711a8cc416925727
2013-06-12 16:37:58 -07:00
Elliott Hughes
6a44d2271f Merge "Take some fixed upstream copyright headers and regenerate NOTICE." 2013-06-12 23:13:34 +00:00
Elliott Hughes
2815b1dd45 Take some fixed upstream copyright headers and regenerate NOTICE.
Change-Id: Ifff41d69c13322dbc6f928ce7d4c65f76fe36772
2013-06-12 16:00:41 -07:00
Elliott Hughes
55189a0fbd Merge "Switch to current upstream stdio makebuf.c and setvbuf.c." 2013-06-12 22:56:20 +00:00
Elliott Hughes
677ee56477 Switch to current upstream stdio makebuf.c and setvbuf.c.
Change-Id: I4761b5e94459815520f01062eef39abf62af621f
2013-06-12 15:24:15 -07:00
Elliott Hughes
f1867d47cb Merge "Revert "Add SIZE_MAX to <stdint.h> in a way that might actually work."" 2013-06-12 22:19:03 +00:00
Elliott Hughes
9248d3a58c Revert "Add SIZE_MAX to <stdint.h> in a way that might actually work."
This reverts commit d8627af159 which caused build breakage:

In file included from bionic/libc/include/limits.h:86:0,
                 from bionic/libc/include/stdint.h:33,
                 from bionic/libc/arch-arm/bionic/crtbegin.c:31:
bionic/libc/include/sys/limits.h:30:26: fatal error: linux/limits.h: No such file or directory
compilation terminated.
make: *** [out/target/product/generic/obj/lib/crtbegin_dynamic1.o] Error 1

Change-Id: I128095ecb99df92626e1f57e34c61e08c98a4078
2013-06-12 22:18:47 +00:00
Elliott Hughes
55c5ec64bd Merge "Add SIZE_MAX to <stdint.h> in a way that might actually work." 2013-06-12 22:05:10 +00:00
Elliott Hughes
d8627af159 Add SIZE_MAX to <stdint.h> in a way that might actually work.
Take two.

Change-Id: I7d08b6d14c82a171312a7f5898270b4441d5cfa2
2013-06-12 14:54:16 -07:00
Elliott Hughes
944ea1a320 Merge "Clean up abort." 2013-06-12 21:53:58 +00:00
Elliott Hughes
61e699a133 Clean up abort.
* A dlmalloc usage error shouldn't call abort(3) because we want to
  cause a SIGSEGV by writing the address dlmalloc didn't like to an
  address the kernel won't like, so that debuggerd will dump the
  memory around the address that upset dlmalloc.

* Switch to the simpler FreeBSD/NetBSD style of registering stdio
  cleanup. Hopefully this will let us simplify more of the stdio
  implementation.

* Clear the stdio cleanup handler before we abort because of a dlmalloc
  corruption error. This fixes the reported bug, where we'd hang inside
  dlmalloc because the stdio cleanup reentered dlmalloc.

Bug: 9301265
Change-Id: Ief31b389455d6876e5a68f0f5429567d37277dbc
2013-06-12 14:14:53 -07:00
Elliott Hughes
5cde15eb17 Merge "<stdint.h> should expose SIZE_MAX." 2013-06-12 17:56:24 +00:00
Elliott Hughes
2c157aec9b <stdint.h> should expose SIZE_MAX.
Change-Id: Id27222c24955c83f29ad953933cbdc48b6f1e900
2013-06-12 10:28:26 -07:00
Nick Kralevich
b6e880200d Merge "fix unittests." 2013-06-11 22:52:54 +00:00
Nick Kralevich
fd0325bd98 fix unittests.
7e6ce1a3c5 fixed abort() to raise
SIGABRT rather than causing SIGSEGV. However, the unittests were
not updated.

Fix unittests.

Change-Id: I73db194127b9b9e9440358aa94273863765a736b
2013-06-11 15:45:23 -07:00
Ben Cheng
fc104f899d Merge "Fix abort(3) to raise SIGABRT rather than causing SIGSEGV." 2013-06-11 00:22:14 +00:00
Ben Cheng
7e6ce1a3c5 Fix abort(3) to raise SIGABRT rather than causing SIGSEGV.
tgkill() needs the .save stack unwinding directive to get the complete
stack trace.

BUG: https://code.google.com/p/android/issues/detail?id=16672

Change-Id: Ifb447dca2147a592c48baf32769dfc175d8aea72
2013-06-10 17:17:46 -07:00
Brian Carlstrom
8252b8e4b7 Merge "Honor p_vaddr if set" 2013-06-10 22:39:07 +00:00
Elliott Hughes
2fbc9dda34 Merge "bionic/x86: Optimization for string routines" 2013-06-08 00:45:07 +00:00
Elliott Hughes
157c42997b Merge "Revert "libc x86: Remove strcat.S"" 2013-06-08 00:42:44 +00:00
Elliott Hughes
06708df6fb Revert "libc x86: Remove strcat.S"
This reverts commit 4fe461b3a6

Change-Id: Ibeb76e24f054abd7c96ad6899366c2f9bfc2a5ad
2013-06-08 00:42:35 +00:00
Christopher Ferris
9647f797d5 Merge "Implement malloc_usable_size for debug impls." 2013-06-07 22:15:33 +00:00
Christopher Ferris
885f3b9cad Implement malloc_usable_size for debug impls.
- Implemented chk_memalign.
- Fixed a few bugs in leak_memalign.
- Implemented {leak,fill,check,qemu}_malloc_usable_size.
- Make malloc_usable_size update at run time.
- Add malloc_test.cpp as a small set of tests for the
  malloc debug routines.
- Fix the qemu routines since it's been broken since it moved to C++.
- Add support for the %u format to the out_vformat in libc_logging.cpp.
  This is used by the emulator code.

Tested using the bionic-unit-tests with setprop libc.debug.malloc
set to 1, 5, and 10.

I tested as much as possible on the emulator, but tracing doesn't appear
to be working properly.

Bug: 6143477

Merge change from internal master.

(cherry-picked from commit 3d594c2580)

Change-Id: I4ae00fffba82315a8c283f35893fd554460722fb
2013-06-07 14:55:32 -07:00
Brian Carlstrom
e7dffe150b Honor p_vaddr if set
(cherry picked from commit 88ff15c2c279d2bbe3569101b36cd2aa0931a0a9)

Change-Id: I4aabbe911d30aea8ace69e29bb6e980a4e89de90
2013-06-07 12:47:58 -07:00
Elliott Hughes
b7b4f5b838 Merge "update signal.h to be C90 compatable" 2013-06-06 01:06:41 +00:00
Erik Gilling
156ccf42ff update signal.h to be C90 compatable
sigismember, sigaddset, and sigdelset had mixed code and declarations
which are not allowed in C90 and before.

Change-Id: I662af944fc1489e34bed228ce592e41f50d00e17
Signed-off-by: Erik Gilling <konkers@android.com>
2013-06-05 18:05:36 -07:00
Elliott Hughes
18af450393 Merge "Ensure header files using __BEGIN_DECLS include sys/cdefs.h." 2013-06-06 01:04:13 +00:00
Elliott Hughes
36fa67bcdd Ensure header files using __BEGIN_DECLS include sys/cdefs.h.
We keep fixing these one-by-one; let's fix them all at once.

Found thus:

  find . -name *.h | xargs grep -L sys/cdefs.h | xargs grep -l BEGIN_DECL | xargs grep -L sys/types

Change-Id: I188842aa2484dc6176e96556d57c38a0f785b59b
2013-06-05 17:58:08 -07:00
Elliott Hughes
c5bfb62433 Merge "sys/personality.h: include <sys/cdefs.h>" 2013-06-06 00:28:07 +00:00
Elliott Hughes
8d36050ac2 Merge "Clean up useless declaration in thread_private.h" 2013-06-06 00:00:40 +00:00
Kito Cheng
328223230b sys/personality.h: include <sys/cdefs.h>
Change-Id: Ia3fa558a38e0cffe5287bc454e85d5d3bdaa6ba1
2013-06-05 11:30:17 +08:00
Kito Cheng
94d0daa2dd Clean up useless declaration in thread_private.h
Change-Id: Ie7bcdc7195a3fcbcd09a95f73b0c49e8897ad50b
2013-06-05 11:26:24 +08:00
Elliott Hughes
4c001859fe Merge "Fix declaration of malloc_usable_size()" 2013-06-05 01:58:26 +00:00
Nick Kralevich
a24e81efd3 Merge "Add tests for __strcpy_chk()" 2013-06-04 19:05:33 +00:00
Nick Kralevich
13476deec4 Add tests for __strcpy_chk()
Change-Id: I5675d04fcd471732c1b87b83879a54fbcd27762e
2013-06-04 11:55:08 -07:00
Brian Carlstrom
0671393072 Merge "Small cleanup of soinfo_elf_lookup." 2013-06-04 03:18:23 +00:00
Christopher Ferris
6bec5b792a Small cleanup of soinfo_elf_lookup.
- Remove unnecessary line.
- Move declarations to first use.

Change-Id: I1d8398d6c13f7cb86bffe0b68af849e35a4b234d
2013-06-03 20:15:14 -07:00
Ben Cheng
404d491eb6 Merge "Use bl instead of blx to support interworking properly." 2013-05-31 21:40:37 +00:00
Ben Cheng
a123b5d319 Use bl instead of blx to support interworking properly.
(cherry picked from commit 9e1905794b in
master)

Change-Id: I9b8c35ea9e201e00f84315f9f105013c23c94d85
2013-05-31 14:39:23 -07:00
Nick Kralevich
8539961ff2 Merge "FORTIFY_SOURCE: strcat / strncat optimize" 2013-05-31 18:13:23 +00:00
Nick Kralevich
cf870199d5 FORTIFY_SOURCE: strcat / strncat optimize
__strcat_chk and __strncat_chk are slightly inefficient,
because they end up traversing over the same memory region
two times.

This change optimizes __strcat_chk / __strncat_chk so they
only access the memory once. Although I haven't benchmarked these
changes, it should improve the performance of these functions.

__strlen_chk - expose this function, even if -D_FORTIFY_SOURCE
isn't defined. This is needed to compile libc itself without
-D_FORTIFY_SOURCE.

Change-Id: Id2c70dff55a276b47c59db27a03734d659f84b74
2013-05-31 09:07:46 -07:00
Liubov Dmitrieva
0a490665a3 bionic/x86: Optimization for string routines
Optimized strcpy, strcat,
strncpy, strncat, strlcpy, strlcat,
memchr, memrchr, strchr, strrchr, index,
strnlen, strlen, wcslen, wmemcmp, wcscmp,
wcschr, wcsrchr, wcscpy, wcscat

Change-Id: I82b29132edf9a2e144e0bb3ee4ff5217df8d2a6d
Signed-off-by: Liubov Dmitrieva <liubov.dmitrieva@intel.com>
2013-05-31 13:37:03 +04:00
Nick Kralevich
72f59c84fd Merge "Add strncpy FORTIFY_SOURCE tests." 2013-05-30 20:33:02 +00:00
Nick Kralevich
8cc145edf4 Add strncpy FORTIFY_SOURCE tests.
Change-Id: Id108b1d72b44d7e5fb911268e80bbdf896808f60
2013-05-30 13:21:14 -07:00
Nick Kralevich
d515f46888 Merge "libc x86: Remove strcat.S" 2013-05-30 19:59:38 +00:00
Nick Kralevich
4fe461b3a6 libc x86: Remove strcat.S
This file is never used.

Change-Id: Iae4bba4a5a12a86a775af05e7477fb7b6511480b
2013-05-30 11:15:40 -07:00
Nick Kralevich
8d79fd1c94 Merge "libc x86: Remove index.S, strcpy.S, strchr.S" 2013-05-24 18:34:57 +00:00
Nick Kralevich
e4c4ada580 libc x86: Remove index.S, strcpy.S, strchr.S
These files are never used.

Change-Id: Iab8474bdff3bd4d225597c62b3c0f0849f808818
2013-05-24 10:50:05 -07:00
Nick Kralevich
2317275f85 Merge "libc: delete x86 memchr.S" 2013-05-24 17:27:47 +00:00
Nick Kralevich
615684c256 libc: delete x86 memchr.S
This file is never used.

Change-Id: Ief08ad176713b5194048852609613801969e1364
2013-05-24 08:52:04 -07:00
David 'Digit' Turner
25a87f7641 Fix declaration of malloc_usable_size()
The function should take a 'const void*' parameter, instead of 'void*'.
Note that the implementation in upstream-dlmalloc/malloc.c already does
this.

For context, see http://b.android.com/55725

Change-Id: Iefd55cdb8996699189e0545f9195972490306227
2013-05-23 10:02:02 +02:00
Brian Carlstrom
f5f29de6ae Merge "Fix bionic linker to support segments with zero p_filesz" 2013-05-21 23:59:52 +00:00
Brian Carlstrom
82dcc7910d Fix bionic linker to support segments with zero p_filesz
(cherry picked from commit 96362fb9d11beef6233aa03db396f25688e70860)

Change-Id: Ib075a6dfc45d5d0746d8b278f317dd9b8d772f2a
2013-05-21 16:57:55 -07:00
Erik Gilling
2e317075b0 Merge "libc/arm: add cortex-a8 cpu variant" 2013-05-16 19:57:28 +00:00
Rom Lemarchand
22bda4bd67 libc/arm: add cortex-a8 cpu variant
Change-Id: I30e8dd6d4b2e7889aea8f5ed21182a5941bfb489
2013-05-15 20:13:28 -07:00
Elliott Hughes
5217fb528c Merge "Fix sysconf(3) for _SC_GETGR_R_SIZE_MAX and _SC_GETPW_R_SIZE_MAX." 2013-05-15 00:56:06 +00:00
Elliott Hughes
d35106fd55 Fix sysconf(3) for _SC_GETGR_R_SIZE_MAX and _SC_GETPW_R_SIZE_MAX.
Change-Id: Ie16cb21c1a5a2bdce8502b5974e2c4dbb1d679ce
2013-05-14 17:20:34 -07:00
Elliott Hughes
6f502bc743 Merge "Add more __restricts, clean up __format__ attributes." 2013-05-14 23:45:49 +00:00
Elliott Hughes
d04c183979 Add more __restricts, clean up __format__ attributes.
Change-Id: I7e2d270cc722d339d221eaea92747eaff3b51403
2013-05-14 16:08:43 -07:00
Elliott Hughes
349ef893e8 Merge "Pull upstream FreeBSD revision 249810." 2013-05-14 23:06:34 +00:00
Elliott Hughes
b2e1abda05 Pull upstream FreeBSD revision 249810.
Fix license clause numbering.

Convert libc/stdio from K&R to ANSI C.

And add '__restrict' where it appeared in the header prototypes.

Change-Id: I5fdb22f79d3effa2298d03f9aa8412b4b087da04
Upstream: http://svnweb.freebsd.org/base?view=revision&revision=249810
2013-05-14 15:02:16 -07:00
Elliott Hughes
5b7b2809a2 Merge "epoll: add EPOLLRDHUP, EPOLLWAKEUP, and EPOLLONESHOT events" 2013-05-14 21:49:09 +00:00
Todd Poynor
b0a2fc35d6 epoll: add EPOLLRDHUP, EPOLLWAKEUP, and EPOLLONESHOT events
(cherry-pick of d1ad4f6dab06189d4d3dcfa19ae4bc301481eb3f.)

Change-Id: I4a8476bff068951533d4188de94097c8b84bc489
2013-05-14 14:48:43 -07:00
Elliott Hughes
f0f4fa3fb1 Merge "libc: add timerfd calls" 2013-05-14 21:45:29 +00:00
Todd Poynor
4200e6203a libc: add timerfd calls
(cherry-pick of 04c0ac14a49e0969333008a9522b64046d58fbdc.)

Change-Id: I06d0b6c2a8781602362b81f48faf1cca76b9ec05
2013-05-14 14:45:02 -07:00
Elliott Hughes
e1c58ab5bb Merge "libc: remove obsolete CLOCK_REALTIME_HR and CLOCK_MONOTONIC_HR" 2013-05-14 21:44:28 +00:00
Todd Poynor
5c4340b2ab libc: remove obsolete CLOCK_REALTIME_HR and CLOCK_MONOTONIC_HR
Add CLOCK_MONOTONIC_RAW, CLOCK_REALTIME_COARSE, and CLOCK_MONOTONIC_COARSE
as supported by recent linux kernels.

(cherry-pick of 60e5144ca312b210b54ac8e6966108da0c97ff80.)

Bug: 8895727
Change-Id: If79a4d05d1301108f49a37588f9416c4be19277a
2013-05-14 14:43:59 -07:00
Elliott Hughes
7cb82791ca Merge "libc: add clock ids CLOCK_REALTIME_ALARM and CLOCK_BOOTTIME_ALARM" 2013-05-14 21:43:37 +00:00
Todd Poynor
23b9fd2c1d libc: add clock ids CLOCK_REALTIME_ALARM and CLOCK_BOOTTIME_ALARM
(cherry-pick of b928bda83d4413b703329f607e2706602f15293f.)

Change-Id: Ica6aad84299819ffc5e57ae4891e057d2e401fa1
2013-05-14 14:43:09 -07:00
Elliott Hughes
d8a9cccb4d Merge "Fix all printf warnings in res_send.c." 2013-05-14 17:10:21 +00:00
Kito Cheng
bb0b09cad6 Fix all printf warnings in res_send.c.
Change-Id: I4bf959140b5a5475897bd80704e64e3c4645fc3f
2013-05-14 10:09:23 -07:00
Elliott Hughes
98f7659d7e Merge "Don't fail to run DT_INIT and DT_INIT_ARRAY constructors if a shared library has DT_PREINIT_ARRAY constructors." 2013-05-09 22:30:21 +00:00
Elliott Hughes
8147d3c284 Don't fail to run DT_INIT and DT_INIT_ARRAY constructors if a shared library has DT_PREINIT_ARRAY constructors.
The GNU dynamic linker silently ignores a DT_PREINIT_ARRAY section
in a shared library. We had ineffectual code that tried to report
an error, which I tried to fix but got wrong --- my version still
wouldn't report the error to the caller, but would prevent us from
continuing to call constructors.

Bug: 8825226
Change-Id: I4fd8450ecc44d8767a1cb808aeecfbfbfc77c070
2013-05-09 15:29:54 -07:00
Ben Cheng
ff220f7003 Merge "Remove a spurious FIXME and unnecessary type cast." 2013-05-07 23:56:11 +00:00
Ben Cheng
63dd03cced Remove a spurious FIXME and unnecessary type cast.
Change-Id: I05dcefdec7f047bef7eef5c5ceb7453992d56c24
2013-05-07 16:53:33 -07:00
Ben Cheng
b9256adab3 Merge "Adjust PC value in ARM stack trace." 2013-05-07 22:45:42 +00:00
Ben Cheng
52171b9bdc Adjust PC value in ARM stack trace.
-2 for Thumb BLX(2) or -4 for the rest.

Change-Id: I804fdabfa1db4709bede222d4b432e8d42d53167
2013-05-07 15:44:13 -07:00
Nick Kralevich
d541ba1719 Merge "Use restrict pointers for various libc functions." 2013-05-07 17:37:48 +00:00
Nick Kralevich
1c462b7a04 Use restrict pointers for various libc functions.
All the cool kids say this is the best thing since sliced bread.
http://cellperformance.beyond3d.com/articles/2006/05/demystifying-the-restrict-keyword.html

For the most part, these changes match what glibc does.

Change-Id: I176268f27f82800162fe5f2515b08d5469ea2dfe
2013-05-07 10:00:21 -07:00
Nick Kralevich
b01f7afd5f Merge "Use __predict_false on some fortify methods." 2013-05-02 21:52:19 +00:00
Nick Kralevich
532d6f09b1 Use __predict_false on some fortify methods.
Give the compiler some hints that these error conditions
are unlikely to occur in practice.

Change-Id: Ifaf7322a12120ef663c8315c1a18c2dcbe4bda23
2013-05-02 14:31:51 -07:00
Nick Kralevich
e4ac8feb58 Merge "libc: cleanup strchr" 2013-05-02 21:17:16 +00:00
Nick Kralevich
4f40e511b0 libc: cleanup strchr
Move strchr to a .cpp file, and change to bionic directory.

Change-Id: I64ade7df326c0a9a714aca4caf5647b6833b1c97
2013-05-02 13:58:03 -07:00
Nick Kralevich
5e3b502b6b Merge "keep test names consistent with other tests." 2013-05-01 22:06:15 +00:00
Nick Kralevich
277226bf43 keep test names consistent with other tests.
Change-Id: I23dc4d963af40406b270af83cd17f6c8c95f1de3
2013-05-01 15:05:01 -07:00
Nick Kralevich
8c00c91aa0 Merge "add strrchr -D_FORTIFY_SOURCE=2 test." 2013-05-01 22:01:20 +00:00
Nick Kralevich
80541922e3 add strrchr -D_FORTIFY_SOURCE=2 test.
Change-Id: I1b95bb0086ae9f2f506f3cc90cee834c0ce3b1d8
2013-05-01 14:55:33 -07:00
Nick Kralevich
bee0ab16e4 Merge "libc: upgrade strrchr to FORTIFY_SOURCE=2" 2013-04-30 23:13:04 +00:00
Nick Kralevich
3b2e6bc9ac libc: upgrade strrchr to FORTIFY_SOURCE=2
Change-Id: I4c34c2ce22c5092c4446dc1ab55f37604c1c223f
2013-04-30 14:19:23 -07:00
Nick Kralevich
c46871302e Merge "libc: upgrade some libc functions to _FORTIFY_SOURCE=2" 2013-04-30 18:51:22 +00:00
Nick Kralevich
9020fd503c libc: upgrade some libc functions to _FORTIFY_SOURCE=2
Upgrade the following functions:

* vsnprintf
* vsprintf
* snprintf
* fgets
* strcpy
* strcat
* strncat
* strlcpy
* strlcat
* strlen
* strchr

Change-Id: Icc036fc7f0bb317e05f7c051617887a1601271aa
2013-04-30 11:31:35 -07:00
Nick Kralevich
b94b2851d7 Merge "libc: upgrade sprintf to _FORTIFY_SOURCE=2" 2013-04-29 23:50:46 +00:00
Nick Kralevich
78d6d9888c libc: upgrade sprintf to _FORTIFY_SOURCE=2
Upgrade sprintf to fortify_source level 2, to catch
additional security bugs.

Change-Id: Ibc957d65e4cb96152de84b3745a04e00fa22659e
2013-04-29 16:41:54 -07:00
Nick Kralevich
382a775378 Merge "strncpy: implement _FORTIFY_SOURCE=2" 2013-04-29 23:07:33 +00:00
Nick Kralevich
1aae9bd170 strncpy: implement _FORTIFY_SOURCE=2
Add support for fortify source level 2 to strncpy.
This will enable detection of more areas where strncpy
is used inappropriately. For example, this would have detected
bug 8727221.

Move the fortify_source tests out of string_test.cpp, and
put it into fortify1_test.cpp.

Create a new fortify2_test.cpp file, which copies all
the tests in fortify1_test.cpp, and adds fortify_source level
2 specific tests.

Change-Id: Ica0fba531cc7d0609e4f23b8176739b13f7f7a83
2013-04-29 15:22:10 -07:00
Nick Kralevich
c6dc62f09c Merge "[NETFILTER]: Fix iptables ABI breakage" 2013-04-29 19:44:30 +00:00
Nick Kralevich
e66ad7809e [NETFILTER]: Fix iptables ABI breakage
Pick up Linux kernel patch 2748e5dec7ca8a3804852c7c4171f9156384d15c
from 2007

http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=2748e5dec7ca8a3804852c7c4171f9156384d15c

[NETFILTER]: Fix iptables ABI breakage on (at least) CRIS
With the introduction of x_tables we accidentally broke compatibility
by defining IPT_TABLE_MAXNAMELEN to XT_FUNCTION_MAXNAMELEN instead of
XT_TABLE_MAXNAMELEN, which is two bytes larger.

On most architectures it doesn't really matter since we don't have
any tables with names that long in the kernel and the structure
layout didn't change because of alignment requirements of following
members. On CRIS however (and other architectures that don't align
data) this changed the structure layout and thus broke compatibility
with old iptables binaries.

Changing it back will break compatibility with binaries compiled
against recent kernels again, but since the breakage has only been
there for three releases this seems like the better choice.

Change-Id: Ie9552b25892109c7042b9752132dc8ebd3552dc3
2013-04-29 12:31:01 -07:00
Andrew Hsieh
f7153fd13f Merge "Remove redundant space within square brackets" 2013-04-26 01:54:38 +00:00
Elliott Hughes
9ff1ffd805 Merge "Improve diagnostics in the face of bad tzdata." 2013-04-25 22:31:14 +00:00
Elliott Hughes
e7aaad8b83 Improve diagnostics in the face of bad tzdata.
Bug: 8373554
Change-Id: If8df5e956105e01cce95221ff0a7fa9d2b474db3
2013-04-25 14:02:59 -07:00
Nick Kralevich
5f28fde8ae Merge "linker: only re-open std* for setuid programs." 2013-04-25 20:31:41 +00:00
Nick Kralevich
8d3e91d4f8 linker: only re-open std* for setuid programs.
get_AT_SECURE() was getting called before linker_env_init() had
been called, and returning the default value ("true"). This was
causing us to reopen closed stdin, stdout, and stderr for ALL
processes, not just privileged (setuid) processes.

Calling path:
  - __linker_init
    - soinfo_link_image
      - get_AT_SECURE
    - __linker_init_post_relocation
      - linker_env_init

This change restores the intended behavior of only re-opening
stdin, stdout, and stderr for privileged processes.

Change-Id: I8b085ea6597710ac4c1a3c93f1bf8b81eecb08c0
2013-04-25 13:15:24 -07:00
Andrew Hsieh
e8f46e8edd Remove redundant space within square brackets
The new "as" in binutils-2.23 (with gcc4.8) is more picky:
it expects register right after [

Change-Id: I876124841582070ab2083ffafe38bc333b5812d0
2013-04-25 15:05:03 +08:00
Elliott Hughes
87efcd2e63 Merge "Don't avoid IPv6 when looking for new tzdata." 2013-04-24 17:52:29 +00:00
Elliott Hughes
85aeb49144 Don't avoid IPv6 when looking for new tzdata.
Our internal IPv6 FTP networking problems have been fixed.

Change-Id: I9949a13fa20a3b0f3845e565e1461320078d3f14
2013-04-24 10:50:45 -07:00
Elliott Hughes
c705daa0a2 Merge "Disable IPv6 when looking for tzdata updates." 2013-04-22 23:41:37 +00:00
Elliott Hughes
21da42ea91 Disable IPv6 when looking for tzdata updates.
My problems connecting to ftp.iana.org are only via IPv6.

Change-Id: I42e4bae7981ec4b64822f745a7a15544d77ef22d
2013-04-22 13:44:50 -07:00
Elliott Hughes
2c60c18c50 Merge "Upgrade to tzdata2013c." 2013-04-22 19:10:08 +00:00
Elliott Hughes
bda2fb5efa Merge "Add signature checking to HTTP tzdata updates." 2013-04-22 19:03:06 +00:00
Elliott Hughes
676e66db25 Add signature checking to HTTP tzdata updates.
Change-Id: Idcfd217eb215d6a170e6884be8d8ad28cd4fe70d
2013-04-22 11:41:57 -07:00
Elliott Hughes
2379088a90 Upgrade to tzdata2013c.
From the release notes:

  Changes affecting current and future time stamps:

    Palestine observed DST starting March 29, 2013.  (Thanks to
    Steffen Thorsen.)  From 2013 on, Gaza and Hebron both observe DST,
    with the predicted rules being the last Thursday in March at 24:00
    to the first Friday on or after September 21 at 01:00.

    Assume that the recent change to Paraguay's DST rules is permanent,
    by moving the end of DST to the 4th Sunday in March every year.
    (Thanks to Carlos Raúl Perasso.)

  Changes affecting past time stamps:

    Fix some historical data for Palestine to agree with that of
    timeanddate.com, as follows:

          The spring 2008 change in Gaza and Hebron was on 00:00 Mar 28, not
          00:00 Apr 1.

          The fall 2009 change in Gaza and Hebron on Sep 4 was at 01:00, not
          02:00.

          The spring 2010 change in Hebron was 00:00 Mar 26, not 00:01 Mar 27.

          The spring 2011 change in Gaza was 00:01 Apr 1, not 12:01 Apr 2.

          The spring 2011 change in Hebron on Apr 1 was at 00:01, not 12:01.

          The fall 2011 change in Hebron on Sep 30 was at 00:00, not 03:00.

    Fix times of habitation for Macquarie to agree with the Tasmania
    Parks & Wildlife Service history, which indicates that permanent
    habitation was 1899-1919 and 1948 on.

  Changing affecting metadata only:

    Macquarie Island is politically part of Australia, not Antarctica.
    (Thanks to Tobias Conradi.)

    Sort Macquarie more-consistently with other parts of Australia.
    (Thanks to Tim Parenti.)

Change-Id: I3df146d046eda26dbc6ce2a0a26ad9214ec4eaca
2013-04-22 11:33:14 -07:00
Elliott Hughes
70e0bd3a44 Merge "Rename the tzdata update tool, and add HTTP support." 2013-04-22 18:28:49 +00:00
Elliott Hughes
f8dff7d449 Rename the tzdata update tool, and add HTTP support.
The FTP server is currently unavailable, but the HTTP server is working
fine.

Change-Id: If7f2f396e89aca022a60af531f3604523b7bf95c
2013-04-22 11:11:43 -07:00
Elliott Hughes
8c181aa8fe Merge "Use hidden visibility for internal-use-only functions" 2013-04-15 17:51:24 +00:00
Christopher Ferris
516a897053 Merge "Rewrite memset for cortexa15 to use strd." 2013-04-12 19:16:44 +00:00
Elliott Hughes
b3c8c4d865 Merge "Switch to current FreeBSD qsort." 2013-04-12 19:12:29 +00:00
Christopher Ferris
796cbe249b Rewrite memset for cortexa15 to use strd.
Merge from internal master.

(cherry-picked from commit 7ffad9c120)

Change-Id: Ia67f2a545399f4fa37b63d5634a3565e4f5482f9
2013-04-12 10:58:25 -07:00
Kito Cheng
ea489745dc Use hidden visibility for internal-use-only functions
- malloc_debug_init and malloc_debug_fini

Change-Id: I4261ff428a50d8f0371415cda71bcc0a9487ce67
2013-04-12 16:34:38 +08:00
Elliott Hughes
0b25f633a2 Switch to current FreeBSD qsort.
Change-Id: Ic46cd0b663dc5fa78c99dd38db0bfe849a25e789
2013-04-11 18:08:34 -07:00
Elliott Hughes
ed36d95fac Merge "Start moving to current FreeBSD stdio." 2013-04-11 21:18:14 +00:00
Elliott Hughes
6b05c8e280 Start moving to current FreeBSD stdio.
This only touches the easy stuff.

Change-Id: Iecee57f1681dba5c56bff59f0e9a89811a71f0ca
2013-04-11 13:55:01 -07:00
Elliott Hughes
b632857a50 Merge "Add missing include and function prototype for sched_getaffinity." 2013-04-11 00:45:33 +00:00
Kito Cheng
4ca685e36e Add missing include and function prototype for sched_getaffinity.
Change-Id: I649633c8d074def5d05bd0c8b92fb680d9d11d51
2013-04-10 17:44:29 -07:00
Christopher Ferris
fc76c7d394 Merge "Add missing branch in memcpy.S dst aligned case." 2013-04-11 00:23:46 +00:00
Christopher Ferris
bf0d1ad72b Add missing branch in memcpy.S dst aligned case.
Merge from internal master.

(cherry-picked from commit 6ffaa931c3)

Change-Id: Ifdcf01fd122866cf0d4c5b5f7a997803561d7889
2013-04-10 17:21:29 -07:00
Christopher Ferris
68fd78efa0 Merge "Update to latest cortexa15 memcpy code." 2013-04-11 00:18:04 +00:00
Christopher Ferris
185ce72d00 Update to latest cortexa15 memcpy code.
This uses the new code original submitted as memcpy.a15.S as
the base. However, the old code handled unaligned src/dst better
so that was spliced in. I optimized the original unaligned code by
removing a few unnecessary instructions. I optimized the a15 code by
rewriting the pre and post code. I also modified the main loop to add
a pld so that larger copies would not stall waiting for memory.

Test cases for the new memcpy:

- Copy all sized values from 0 to 1024 bytes, using whatever alignment
  is returned by malloc.
For each alignment case described below, the test copied from 0 to 128
bytes.
- Src and dst pointers are both aligned to the same value, starting
  at one going through every power of two up to and including 128.
- Src aligned to double word boundary, dst aligned to word boundary.
- Src aligned to word boundary, dst aligned to double word boundary.
- Src aligned to 16 bit boundary, dst aligned to word boundary.
- Src aligned to word boundary, dst aligned to 16 byte boundary.
- Src aligned to word boundary, dst aligned to 1 byte from a word
  boundary.
- Src aligned to word boundary, dst aligned to 2 bytes from a word
  boundary.
- Src aligned to word boundary, dst aligned to 3 bytes from a word
  boundary.
- Src aligned to 1 byte from a word boundary, dst aligned to a word
  boundary.
- Src aligned to 2 bytes from a word boundary, dst aligned to a word
  boundary.
- Src aligned to 3 bytes from a word boundary, dst aligned to a word
  boundary.

Cases to verify the unaligned source code properly aligns to a 16 bit
boundary.
- Src aligned to 1 byte from a 128 bit boundary, dst aligned to
  4 + 128 bit boundary.
- Src aligned to 1 byte from a 128 bit boundary, dst aligned to
  8 + 128 bit boundary.
- Src aligned to 1 byte from a 128 bit boundary, dst aligned to
  12 + 128 bit boundary.
- Src aligned to 1 byte from a 128 bit boundary, dst aligned to
  16 + 128 bit boundary.

In all cases, a two byte fencepost was placed at the end of the
destination to verify that only the requested number of bytes were copied.

Bug: 8005082

Merge from internal master.

(cherry-picked from commit 21ede92d79)

Change-Id: Ief70c9e6dc8c6473ae245b6570b2c266fed9618c
2013-04-08 18:13:35 -07:00
Elliott Hughes
240fb8623b Merge "Make abort messages available to debuggerd." 2013-04-05 18:25:21 +00:00
Elliott Hughes
0d787c1fa1 Make abort messages available to debuggerd.
This adds __libc_fatal, cleans up the internal logging code a bit more,
and switches suitable callers over to __libc_fatal. In addition to logging,
__libc_fatal stashes the message somewhere that the debuggerd signal handler
can find it before calling abort.

In the debuggerd signal handler, we pass this address to debuggerd so that
it can come back with ptrace to read the message and present it to the user.

Bug: 8531731
Change-Id: I416ec1da38a8a1b0d0a582ccd7c8aaa681ed4a29
2013-04-05 11:24:19 -07:00
Elliott Hughes
014c75c78b Merge "Prevent name conflict for eventfd.cpp and eventfd.s when building libc.a" 2013-04-03 18:11:30 +00:00
Kito Cheng
8baa929d5d Prevent name conflict for eventfd.cpp and eventfd.s when building libc.a
- eventfd.cpp and eventfd.s will output to the same file when building libc.a
   out/target/product/*/obj/STATIC_LIBRARIES/libc_intermediates/WHOLE/libc_common_objs/eventfd.o
 - And then `eventfd` will undefined when statically linked to libc.

Also add a unit test.

Change-Id: Ib310ade3256712ca617a90539e8eb07459c98505
2013-04-03 11:10:37 -07:00
Elliott Hughes
14c840df90 Merge "Fix the SYSCALLS.TXT documentation and remove a dead script." 2013-04-03 17:22:18 +00:00
Elliott Hughes
a51916b58b Fix the SYSCALLS.TXT documentation and remove a dead script.
We use the system call constants from the kernel header files now,
so there's no need to check that they've been correctly transcribed
into SYSCALLS.TXT.

This is a work in progress. I've added TODOs to SYSCALLS.TXT explaining
what's left to do.

Change-Id: I3b86acfe7f84b4da1c802ee5a4ef13a2e83e7939
2013-04-03 10:08:09 -07:00
Elliott Hughes
162b4411fc Merge "Stop using unreasonable numbers of map entries." 2013-04-03 00:48:07 +00:00
Elliott Hughes
4ace92c62a Stop using unreasonable numbers of map entries.
Bug: 8460659
Change-Id: Ib0ee71e3cf61e122d0449c9d8a4e4670a7d7129a
2013-04-02 17:41:14 -07:00
Elliott Hughes
7a29f404e1 Merge "Update getaddrinfo to RFC6724" 2013-04-02 01:05:23 +00:00
Lorenzo Colitti
378b0e1ea2 Update getaddrinfo to RFC6724
Currently, our getaddrinfo implementation does not conform to
any IETF standard. It follows draft-ietf-6man-rfc3484-revise-01,
but that draft has expired. Update the policy table to RFC6724.

(cherry-pick of e919b116d35aa7deb24ddece69c491e24c3b0d6f.)

Bug: 8276725
Change-Id: I2d17122defd966ac6c2c13d04887fb110f2598a0
2013-04-01 18:04:25 -07:00
Elliott Hughes
6bb17dfad3 Merge "Extra logging in pthread_create." 2013-03-29 23:47:24 +00:00
Elliott Hughes
cfa089df23 Extra logging in pthread_create.
pthread_create returns EAGAIN when it can't allocate a pthread_internal_t,
when it can't allocate a stack for the new thread, or when clone(2) fails
because there are too many threads. It's useful to be able to know why your
pthread_create just failed, so add some logging.

Bug: 8470684
Change-Id: I1bb4497d4f7528eacce0db35c2014771cba64569
2013-03-29 16:35:00 -07:00
Jean-Baptiste Queru
74b324ac09 Merge "Update processed linux/fs.h header file" 2013-03-28 15:49:04 +00:00
Ken Sumrall
ce636ca088 Update processed linux/fs.h header file
Need to get the defintion of the FITRIM ioctl().  Also need
to update the blk_types.h header file as fs.h includes it.

Change-Id: I617622b620925942dc5aead9e49f8e17d17e5d74
2013-03-27 16:05:22 -07:00
Elliott Hughes
bd014c2e42 Merge "Remove some dead script code and fix a script comment." 2013-03-25 21:20:22 +00:00
Elliott Hughes
e6ddfc55c8 Remove some dead script code and fix a script comment.
Change-Id: I91ca466d1b6f8a39da50ce61acebc268d0daab3a
2013-03-25 14:09:52 -07:00
Elliott Hughes
e51d75a9ac Merge "Clarify the dlmalloc USAGE_ERROR." 2013-03-25 20:54:14 +00:00
Elliott Hughes
65a8eb606f Clarify the dlmalloc USAGE_ERROR.
Bug: 8468088
Change-Id: I1ff6d51ec17fc74ef56229aa76d2986cbd662874
2013-03-25 13:48:41 -07:00
Elliott Hughes
48f25cef45 Merge "Stop generating <sys/linux-syscalls.h>." 2013-03-25 17:13:26 +00:00
Elliott Hughes
a40751185f Merge "Unhide __set_errno for backwards compatibility." 2013-03-25 17:12:19 +00:00
Elliott Hughes
3198850ea1 Unhide __set_errno for backwards compatibility.
This was in <errno.h>, and people called it :-(

Change-Id: I038490be77eb9372b3f31305ec580fa7b09c983e
2013-03-22 19:12:52 -07:00
Elliott Hughes
1b91c6c11f Stop generating <sys/linux-syscalls.h>.
The <asm/unistd.h> files contain the canonical data, and
<sys/glibc-syscalls.h> contain new glibc-compatible names,
and if you #include the standard <sys/syscall.h> you get
both sets of names.

Change-Id: I9919c080931c0ba1660f5e37c6a6265ea716d603
2013-03-22 18:56:24 -07:00
Elliott Hughes
c019345a3a Merge "Use the correct names for the __ARM_NR_* syscalls." 2013-03-22 21:50:49 +00:00
Elliott Hughes
cda62094ef Use the correct names for the __ARM_NR_* syscalls.
This lets us move all the ARM syscall stubs over to the kernel <asm/unistd.h>.
Our generated <sys/linux-syscalls.h> is now unused, but I'll remove that in a
later change.

Change-Id: Ie5ff2cc4abce1938576af7cbaef615a79c7f310d
2013-03-22 13:53:43 -07:00
Elliott Hughes
c37dd59956 Merge "Fix builds where _FORTIFY_SOURCE is off." 2013-03-22 18:01:50 +00:00
Elliott Hughes
890c8ed6ef Fix builds where _FORTIFY_SOURCE is off.
Also add a more intention-revealing guard so we don't have loads of
places checking whether our inlining macro is defined.

Change-Id: I168860cedcfc798b07a5145bc48a125700265e47
2013-03-22 10:58:55 -07:00
Elliott Hughes
34895c1bdf Merge "Replace unnecessary ARM uses of <sys/linux-syscalls.h> with <asm/unistd.h>." 2013-03-22 17:51:43 +00:00
Elliott Hughes
5c5f303e4c Merge "Replace unnecessary MIPS uses of <sys/linux-syscalls.h> with <asm/unistd.h>." 2013-03-22 16:30:56 +00:00
Elliott Hughes
e7cb795cb6 Merge "Replace unnecessary x86 uses of <sys/linux-syscalls.h> with <asm/unistd.h>." 2013-03-22 16:30:37 +00:00
Elliott Hughes
8794ece296 Replace unnecessary ARM uses of <sys/linux-syscalls.h> with <asm/unistd.h>.
For some reason, socketcalls.c was only being compiled for ARM, where
it makes no sense. For x86 we generate stubs for the socket functions
that use __NR_socketcall directly.

Change-Id: I84181e6183fae2314ae3ed862276eba82ad21e8e
2013-03-21 23:07:11 -07:00
Elliott Hughes
56d161bb62 Replace unnecessary MIPS uses of <sys/linux-syscalls.h> with <asm/unistd.h>.
Change-Id: I193a35f9790b82c83fd3b5672e24ac1b2034d0b0
2013-03-21 22:52:25 -07:00
Elliott Hughes
4cdde5f123 Replace unnecessary x86 uses of <sys/linux-syscalls.h> with <asm/unistd.h>.
Change-Id: I9d016ee8e8329cccf244d27c336d9524348af996
2013-03-21 22:48:18 -07:00
Elliott Hughes
3323628d63 Merge "The SYS_ constants should cover all __NR_ values." 2013-03-22 05:39:06 +00:00
Elliott Hughes
5c2772f59d The SYS_ constants should cover all __NR_ values.
<sys/linux-syscalls.h> only contains constants for the syscalls
we're generating stubs for. We want all the syscalls available
on the architecture in question.

Keep using <sys/linux-syscalls.h> on ARM for now because the
__NR_ARM_set_tls and __NR_ARM_cacheflush values aren't in <asm/unistd.h>.

Change-Id: I66683950d87d9b18d6107d0acc0ed238a4496f44
2013-03-21 22:26:20 -07:00
Elliott Hughes
babb72dc29 Merge "Fix pthread_setname_np's behavior on invalid pthread_ts." 2013-03-22 05:11:35 +00:00
Elliott Hughes
a41ba2f0bf Fix pthread_setname_np's behavior on invalid pthread_ts.
Change-Id: I0a154beaab4d164ac812f2564d12e4d79b80a8e8
2013-03-21 20:02:35 -07:00
Elliott Hughes
26c815c489 Merge "Drop magic number in strrchr and strchr" 2013-03-22 02:48:32 +00:00
Elliott Hughes
612333f671 Merge "Don't #define SYS_ constants unless they make sense for the current architecture." 2013-03-22 02:46:22 +00:00
Elliott Hughes
9724ce3a09 Don't #define SYS_ constants unless they make sense for the current architecture.
Fixes the MIPS and x86 builds. strace tests whether syscalls
are supported using #ifdef of the appropriate SYS_ constant.

Change-Id: I90be118dc42abfdaf5b0f9b1e676e8601f55106e
2013-03-21 19:44:36 -07:00
Elliott Hughes
4453c51c82 Merge "Drop unnecessary execution permission for .cpp/.c/.h" 2013-03-22 02:30:28 +00:00
Kito Cheng
8f7120bbac Drop unnecessary execution permission for .cpp/.c/.h
Change-Id: I9ac2b9d8f6bdb4fab8962210c5ec8f9c3e8c0ebf
2013-03-22 10:28:15 +08:00
Kito Cheng
a6cc67c9b5 Drop magic number in strrchr and strchr
Change-Id: Ic7391be8575eaaac76914dab62bc41c9773d703d
2013-03-22 10:18:09 +08:00
Elliott Hughes
800ad8249a Merge "Provide glibc-compatible SYS_* aliases for the __NR_* constants." 2013-03-22 01:41:57 +00:00
Elliott Hughes
8ecf225827 Provide glibc-compatible SYS_* aliases for the __NR_* constants.
This helps us remove another external/strace bionic hack.

Change-Id: I3e82c0d2fd27e479be98f096e05b666fd16f8eb3
2013-03-21 18:06:55 -07:00
Elliott Hughes
6eb978c9bf Merge "Expose wait4 as wait4 rather than __wait4." 2013-03-21 23:14:48 +00:00
Elliott Hughes
17a8b0db63 Expose wait4 as wait4 rather than __wait4.
This helps strace(1) compile with one fewer hack.

Change-Id: I5296d0cfec5546709cda990abd705ad33d7c4626
2013-03-21 16:14:06 -07:00
Christopher Ferris
86c3184972 Merge "Create arch specific versions of strcmp." 2013-03-20 22:14:48 +00:00
Christopher Ferris
31dea25b8b Create arch specific versions of strcmp.
This uses the new strcmp.a15.S code as the basis for new versions
of strcmp.S.

The cortex-a15 code is the performance optimized version of strcmp.a15.S
taken with only the addition of a few pld instructions.
The cortex-a9 code is the same as the cortex-a15 code except that the
unaligned strcmp code was taken from the original strcmp.S.
The krait code is the same as the cortex-a15 code except that one path
in the unaligned strcmp code was taken from the original strcmp.S code
(the 2 byte overlap case).
The generic code is the original unmodified strmp.S from the bionic
subdirectory.

All three new versions underwent these test cases:

Strings the same, all same size:
- Both pointers double word aligned.
- One pointer double word aligned, one pointer word aligned.
- Both pointers word aligned.
- One pointer double word aligned, one pointer 1 off a word alignment.
- One pointer double word aligned, one pointer 2 off a word alignment.
- One pointer double word aligned, one pointer 3 off a word alignment.
- One pointer word aligned, one pointer 1 off a word alignment.
- One pointer word aligned, one pointer 2 off a word alignment.
- One pointer word aligned, one pointer 3 off a word alignment.
For all cases where it made sense, the two pointers were also tested
swapped.

Different strings, all same size:
- Single difference at double word boundary.
- Single difference at word boudary.
- Single difference at 1 off a word alignment.
- Single difference at 2 off a word alignment.
- Single difference at 3 off a word alignment.

Different sized strings, strings the same until the end:
- Shorter string ends on a double word boundary.
- Shorter string ends on word boundary.
- Shorter string ends at 1 off a word boundary.
- Shorter string ends at 2 off a word boundary.
- Shorter string ends at 3 off a word boundary.

For all different cases, run them through the same pointer alignment
cases when the strings are the same size.
For all cases the two pointers were also tested swapped.

Bug: 8005082

Merge from internal master.

(cherry-picked from commit a9a5870d16)

Change-Id: I4c2b98f8a50804fb98ab67f75e9d660f1315a144
2013-03-20 14:33:54 -07:00
Elliott Hughes
adeec09629 Merge "Hide various symbols that shouldn't be exposed." 2013-03-15 23:52:09 +00:00
Elliott Hughes
ce532721aa Hide various symbols that shouldn't be exposed.
A mangled symbol in libc.so is a symbol that shouldn't be exported
by libc.so.

Change-Id: Id92d1e1968b3d11d111a5d9ef692adb1ac7694a1
2013-03-15 16:31:09 -07:00
Elliott Hughes
c14166477e Merge "Clean up internal libc logging." 2013-03-15 23:21:36 +00:00
Elliott Hughes
8f2a5a0b40 Clean up internal libc logging.
We only need one logging API, and I prefer the one that does no
allocation and is thus safe to use in any context.

Also use O_CLOEXEC when opening the /dev/log files.

Move everything logging-related into one header file.

Change-Id: Ic1e3ea8e9b910dc29df351bff6c0aa4db26fbb58
2013-03-15 16:12:58 -07:00
Elliott Hughes
e23ed8c644 Merge "Remove useless workaround for HTC RIL bugs." 2013-03-15 18:26:49 +00:00
Elliott Hughes
2eb44c5aa4 Remove useless workaround for HTC RIL bugs.
HTC's RIL uses the liblog logging, not ours.

Change-Id: I56f9304e833ccb329df4ee55042012d7ad5f73ed
2013-03-15 11:21:15 -07:00
Elliott Hughes
4d696eb49d Merge "Fix a bug in ZoneCompactor that meant the zonetab offset was wrong." 2013-03-15 00:30:48 +00:00
Elliott Hughes
af7f2f2fcf Fix a bug in ZoneCompactor that meant the zonetab offset was wrong.
Bug: 8391426
Change-Id: Ia4a8889b5a613aa96bb3fb5d89a921c913ff7626
2013-03-14 17:10:24 -07:00
Christopher Ferris
d30b9f0912 Merge "Remove unused arm defines." 2013-03-14 23:23:42 +00:00
Christopher Ferris
306a353825 Remove unused arm defines.
The defines HAVE_32_BYTE_CACHE_LINES and ARCH_ARM_USE_NON_NEON_MEMCPY
are not used by any code. The previous memcpy code that used these
has been split into different architecture versions to avoid the need
for them.

Bug: 8005082

Merge from internal master.

(cherry-picked from commit 6e1a5cf31b)

Change-Id: Ib18fc3f4131b21cdbd19b9dde7697ac25d066fcf
2013-03-14 15:32:18 -07:00
Elliott Hughes
f861bc5cc3 Merge "Don't search off the end of the index for bad Olson ids." 2013-03-14 22:05:44 +00:00
Elliott Hughes
e0175ca7e4 Don't search off the end of the index for bad Olson ids.
In the old code, the index was a file to itself, so it made sense to
read until you hit the end of the file. In the new code, the index is
followed by hundreds of KiB of data, so we need to just search the
index.

Bug: 8368791
Change-Id: Icf5f8b5516cf3a93679fa849c9f6cd1cb100e0f1
2013-03-14 14:38:08 -07:00
Elliott Hughes
ec706c24ac Merge "Use the kernel's MAX_ERRNO in the syscall stubs." 2013-03-13 00:44:33 +00:00
Elliott Hughes
9aceab5015 Use the kernel's MAX_ERRNO in the syscall stubs.
Bug: http://code.google.com/p/android/issues/detail?id=53104
Change-Id: Iaabf7025b153e96dc5eca231a33a32d4cb7d8116
2013-03-12 17:43:58 -07:00
Christopher Ferris
980508b0ea Merge "Break bionic implementations into arch versions." 2013-03-12 22:24:09 +00:00
Christopher Ferris
04954a43b3 Break bionic implementations into arch versions.
Move arch specific code for arm, mips, x86 into separate
makefiles.
In addition, add different arm cpu versions of memcpy/memset.

Bug: 8005082

Merge from internal master (acdde8c1cf).

Change-Id: I04f3d0715104fab618e1abf7cf8f7eec9bec79df
2013-03-12 14:06:08 -07:00
Elliott Hughes
94a34010c1 Merge "Support large errno values on ARM." 2013-03-12 19:08:36 +00:00
Elliott Hughes
cb2069bf69 Support large errno values on ARM.
Bug: http://code.google.com/p/android/issues/detail?id=53104
Change-Id: Ic6c40be2dc50f0644a3d8b09ceae59c38f2d5b53
2013-03-12 11:55:45 -07:00
Elliott Hughes
f21aa3b61e Merge "Use more types than just 'unsigned' in the linker." 2013-03-12 18:54:27 +00:00
Elliott Hughes
ca0c11bd82 Use more types than just 'unsigned' in the linker.
Still chipping away at the situation where every variable in the
linker was of type 'unsigned'. This patch switches counts over to
being size_t and adds an explicit type for init/fini function pointers
and arrays of function pointers.

Also improve logging from CallArray.

Also remove trailing "\n"s from log messages.

Change-Id: Ie036d2622caac50f4d29f0570888bb527661d77e
2013-03-12 11:26:56 -07:00
Elliott Hughes
6b4c77f854 Merge "Use Elf32_Addr instead of unsigned in linker" 2013-03-12 17:24:04 +00:00
Kito Cheng
fa8c05dc00 Use Elf32_Addr instead of unsigned in linker
Change-Id: I52dcbb4b0ff0a4052e0ad7a9bbeb2df65c9d2f66
2013-03-12 10:19:14 -07:00
Elliott Hughes
53630c0e5b Merge "Cache the most-recently used timezone for mktime_tz and localtime_tz." 2013-03-12 01:28:36 +00:00
Elliott Hughes
4a05bef4c0 Cache the most-recently used timezone for mktime_tz and localtime_tz.
Normally, the C library implicitly caches your timezone by virtue
of the fact that the prehistoric API assumes a single timezone for
the entire process.

The unfortunate mktime_tz and localtime_tz extensions work around
this, but represent timezones as strings to their callers, so code
that makes heavy use of these needs a cache to be able to perform
acceptably until it can hopefully one day be rewritten to use
java.util.Calendar or icu4c.

Bug: 8270865
Change-Id: I92e3964e86dc33ceac925f819cc5e26ff4203f50
2013-03-11 17:17:02 -07:00
Elliott Hughes
4b032ee9b9 Merge "Let bionic use the correct definition of ssize_t." 2013-03-11 17:55:27 +00:00
Elliott Hughes
62c5cd0781 Merge "Upgrade to tzdata2013b." 2013-03-11 16:42:57 +00:00
Elliott Hughes
cdb27f239b Upgrade to tzdata2013b.
From the release notes:

  Changes affecting current and future time stamps:

    Haiti uses US daylight-saving rules this year, and presumably future years.
    This changes time stamps starting today.  (Thanks to Steffen Thorsen.)

    Paraguay will end DST on March 24 this year.
    (Thanks to Steffen Thorsen.)  For now, assume it's just this year.

    Morocco does not observe DST during Ramadan;
    try to predict Ramadan in Morocco as best we can.
    (Thanks to Erik Homoet for the heads-up.)

Change-Id: I98d5290ea5a1d9fb1eeddab1c9e72135dc9e4bd1
2013-03-11 09:41:45 -07:00
Elliott Hughes
b6e22482d4 Let bionic use the correct definition of ssize_t.
Bug: 8253769
Change-Id: I50c7cc20828fc089b83580e039ce9153a6c5a8cc
2013-03-08 15:28:52 -08:00
Elliott Hughes
809eed1ded Merge "Regenerate NOTICE files." 2013-03-07 19:57:17 +00:00
Elliott Hughes
0493a6f7be Regenerate NOTICE files.
Also clean up some obsolete cruft.

Change-Id: Iec3b36f6607f7a08b72de99294ed5b6cd910dd5f
2013-03-07 11:51:10 -08:00
Elliott Hughes
c9f8081425 Merge "Upgrade to current NetBSD popen/pclose." 2013-03-07 00:54:22 +00:00
Elliott Hughes
6b3f49a537 Upgrade to current NetBSD popen/pclose.
This gets us back to using vfork now our ARM vfork assembler stub is
fixed, and adds the missing thread safety for the 'pidlist'.

Bug: 5335385
Change-Id: Ib08bfa65b2cb9fa695717aae629ea14816bf988d
2013-03-06 16:20:55 -08:00
Brian Carlstrom
33031fed84 Merge "Fix typo of DT_NEEDED for DT_NULL" 2013-03-06 23:55:56 +00:00
Brian Carlstrom
2d4b9b7cff Fix typo of DT_NEEDED for DT_NULL
(cherry-pick of 138b205ea9efc117fe522c2d7191378023a6e2cd)

Change-Id: Ia895cb3018df55554627f1f61dcdfdada4a961ce
2013-03-06 15:50:30 -08:00
Elliott Hughes
db794197cc Merge "Fix debug malloc." 2013-03-06 23:13:01 +00:00
Elliott Hughes
642331b5dd Fix debug malloc.
This was broken by the change to use AT_RANDOM for the stack guards.

Bug: 7959813
Bug: 8330764
Change-Id: I791900092b72a9a900f16585237fa7ad82aaed9f
2013-03-06 15:03:53 -08:00
Elliott Hughes
0b4a85bf1e Merge "Fix TIMING/STATS/COUNT_PAGES dynamic linker build" 2013-03-06 17:25:51 +00:00
Kito Cheng
5e2492eb89 Fix TIMING/STATS/COUNT_PAGES dynamic linker build
Change-Id: I6432ac378816da253b83d1c7fb1d3fb64647b89e
2013-03-06 23:58:48 +08:00
Brian Carlstrom
20958207d5 Merge "Fix MIPS linker build" 2013-03-06 09:06:16 +00:00
Brian Carlstrom
43cc7f795b Fix MIPS linker build
(cherry-picked from 8c7d8c2057e303985f78eab96da747ddaa013c78)

Change-Id: Idcf62ab95f8fccbc2d7c3e771a4cfbe768a1555e
2013-03-06 01:05:08 -08:00
Elliott Hughes
c41dcad040 Merge "More linker cleanup." 2013-03-06 06:26:56 +00:00
Elliott Hughes
650be4e584 More linker cleanup.
Change-Id: I9fb3c7c0d4b4ffef0eeaf092d4e30ffe63a08671
2013-03-05 22:24:34 -08:00
Brian Carlstrom
036f909720 Merge "Minor linker cleanup, primarily to use Elf32_Dyn" 2013-03-06 01:01:23 +00:00
Brian Carlstrom
d4ee82dfa3 Minor linker cleanup, primarily to use Elf32_Dyn
Change-Id: Ifa9408e9859c6f79444715bed4808b7c13fdced5
2013-03-05 15:27:21 -08:00
Elliott Hughes
be21fe7a59 Merge "Upgrade to tzdata2013a." 2013-03-05 22:19:14 +00:00
Elliott Hughes
69af6e61e1 Upgrade to tzdata2013a.
From the release notes:

    Chile's 2013 rules, and we guess rules for 2014 and later, will be
    the same as 2012, namely Apr Sun>=23 03:00 UTC to Sep Sun>=2 04:00 UTC.
    (Thanks to Steffen Thorsen and Robert Elz.)

    New Zones Asia/Khandyga, Asia/Ust-Nera, Europe/Busingen.
    (Thanks to Tobias Conradi and Arthur David Olson.)

Change-Id: I351e04b3348420ad7df7c648963c235b534033d2
2013-03-05 14:00:38 -08:00
Ben Cheng
133d97e4b0 Merge "Add stack unwinding directives to memcpy." 2013-03-05 21:04:38 +00:00
Elliott Hughes
cf9a9604ad Merge "Switch to upstream-freebsd for the unmolested wchar code." 2013-03-05 20:13:33 +00:00
Elliott Hughes
eb93ebffba Switch to upstream-freebsd for the unmolested wchar code.
Change-Id: I87b4d76ff8da04109ae53638eec4f11629798960
2013-03-01 18:35:56 -08:00
Elliott Hughes
d392e044c7 Merge "Move realpath.c to upstream-freebsd." 2013-03-02 01:22:04 +00:00
Elliott Hughes
f0777843c0 Move realpath.c to upstream-freebsd.
This is actually a slightly newer upstream version than the one I
originally pulled. Hopefully now it's in upstream-freebsd it will
be easier to track upstream, though I still need to sit down and
write the necessary scripts at some point.

Bug: 5110679
Change-Id: I87e563f0f95aa8e68b45578e2a8f448bbf827a33
2013-03-01 17:11:39 -08:00
Ben Cheng
14283004f5 Add stack unwinding directives to memcpy.
Also include some Android specific header files.

Change-Id: Idbcbd43458ba945ca8c61bfbc04ea15fc0ae4e00
2013-03-01 14:56:04 -08:00
Ben Cheng
c5c6cb3f13 am 66273ac2: Merge "Adding strcmp tuned for Cortex-A15."
* commit '66273ac2b6b8c2042350204575bd290d96dea681':
  Adding strcmp tuned for Cortex-A15.
2013-03-01 10:24:58 -08:00
Ben Cheng
8f149da08d am b3b1ab62: Merge "Adding memcpy tuned for Cortex-A15."
* commit 'b3b1ab6213df15f99c9af3088cfe733428816fd3':
  Adding memcpy tuned for Cortex-A15.
2013-03-01 10:24:58 -08:00
Nick Kralevich
305a999589 am 8fa924e5: Merge "unistd.h: don\'t include sys/capability.h"
* commit '8fa924e5dff4588cca8586e0e727b9a544db7083':
  unistd.h: don't include sys/capability.h
2013-03-01 10:24:57 -08:00
Ben Cheng
66273ac2b6 Merge "Adding strcmp tuned for Cortex-A15." 2013-03-01 18:18:34 +00:00
Ben Cheng
b3b1ab6213 Merge "Adding memcpy tuned for Cortex-A15." 2013-03-01 18:18:05 +00:00
Nick Kralevich
8fa924e5df Merge "unistd.h: don't include sys/capability.h" 2013-03-01 18:14:47 +00:00
Nick Kralevich
6524d3cad7 unistd.h: don't include sys/capability.h
Don't pull in unnecessary header files. AFAIK, I've fixed all
the code which didn't include the correct header files.

Change-Id: If0b7bba74e77cb24a0cf9ce8968aa07400855e58
2013-03-01 10:10:55 -08:00
Greta Yorsh
eb149e954e Adding strcmp tuned for Cortex-A15.
The attached patch provides a new implementation of strcmp for ARM,
using LDRD instead of LDR whenever possible.

For older architectures that do not support LDRD, this implementation
uses the same algorithm as before.

Testing and benchmarking:
* Validation: successfully passes a test that compares different strings
of length 1-128 and offsets 0-8 from a word boundary. Checked on
qemu/A15/A9, ARM/Thumb mode, Big/Little Endian.
* Integration with gcc: no regression on qemu for arm-none-eabi --with-cpu
a15/a9 --with-mode arm/thumb.

Change-Id: I9e230e1b99dbdc9119b69ee858a89038c516a4ea
Signed-off-by: Vassilis Laganakos <vasileios.laganakos@arm.com>
2013-03-01 10:41:01 +00:00
Greta Yorsh
5b349fc22e Adding memcpy tuned for Cortex-A15.
The strategy for large block sizes is LDRD and STRD with offset addressing,
where the main loop copies 64 bytes in every iteration, (i.e., 8 calls to
LDRD and STRD pairs), interleaving load and stores (i.e., the pairs of LDRD
and STRD of the same data are consecutive instructions), and the writeback
of an updated address is a separate instruction, which allows us to write
back the accumulated update once per iteration.

This strategy is implemented in memcpy.S. In some configurations, a plain
version of memcpy (included from memcpy-stub.c) is used instead of the
optimized one.

Validation:
* Correctness: checked memcpy using a test harness for block sizes
ranging between 1 to 128, and source and destination buffers alignment
ranging in { 0,1,2,3,4,8,12 } bytes each.
* Performance: benchmarking on Cortex-A15 FPGA indicates that this strategy
is better for A15 than the strategy used by glibc and even slightly better
than using NEON. Benchmarking on Cortex-A9 bare metal and Linux shows
that the proposed strategy is reasonable: not as fast as the version of
memcpy from glibc (which is the best open source strategy for A9), but
comparable with csl and bionic.
* Integration with GCC: no regression for arm-none-eabi --with-cpu
cortex-a15 and cortex-a9.

Change-Id: Ied56354d8992c62ae3e02d582a2bd55585d814b9
Signed-off-by: Vassilis Laganakos <vasileios.laganakos@arm.com>
2013-03-01 10:40:50 +00:00
Elliott Hughes
7fe8229b49 am 9c1912c4: Merge "Avoid changing the C++ ABI with ssize_t."
* commit '9c1912c4bf5eb0edc07bfd333226c4e0b7a629eb':
  Avoid changing the C++ ABI with ssize_t.
2013-02-28 11:43:11 -08:00
Elliott Hughes
9c1912c4bf Merge "Avoid changing the C++ ABI with ssize_t." 2013-02-28 19:28:53 +00:00
Elliott Hughes
e255642dc1 Avoid changing the C++ ABI with ssize_t.
Bug: 8253769
Change-Id: Ia325003ed6e59da553e2bdde7c43515bc191b8ba
2013-02-28 10:51:31 -08:00
Nick Kralevich
58b997c812 am f5f906c1: Merge "libc: create sys/capability.h"
* commit 'f5f906c184677b8295523231cfeead9ed94661ad':
  libc: create sys/capability.h
2013-02-26 13:50:03 -08:00
Nick Kralevich
f5f906c184 Merge "libc: create sys/capability.h" 2013-02-26 21:38:11 +00:00
Nick Kralevich
7c0dd555c0 libc: create sys/capability.h
Per "man capset", sys/capability.h is the appropriate header file
for the capget / capset definition, not unistd.h. Fixed.

As a short term hack, continue to include sys/capability.h in
unistd.h, until we can fix all the code which uses capget / capset.

Change-Id: I6e7cf55955d761ca785a14c5e4b7a44125d8fc15
2013-02-26 13:27:15 -08:00
Elliott Hughes
406968b244 am c0e9ddd0: Merge "Reimplement scandir(3)."
* commit 'c0e9ddd002f6084c29c26236d741d64d01713c15':
  Reimplement scandir(3).
2013-02-25 14:54:54 -08:00
Elliott Hughes
c0e9ddd002 Merge "Reimplement scandir(3)." 2013-02-25 22:43:28 +00:00
Elliott Hughes
701bec2af3 Reimplement scandir(3).
The old scandir implementation didn't take into account the varying
size of directory entries, and didn't correctly clean up on its
error exits.

Bug: 7339844
Change-Id: Ib40e3564709752241a3119a496cbb2192e3f9abe
2013-02-25 13:14:31 -08:00
Elliott Hughes
7b8bf68133 am f6bb5bf4: Merge "Add the glibc-compatible names to <sys/endian.h>."
* commit 'f6bb5bf498810d0622f66020059c87b96738c35b':
  Add the glibc-compatible names to <sys/endian.h>.
2013-02-22 14:02:44 -08:00
Elliott Hughes
f6bb5bf498 Merge "Add the glibc-compatible names to <sys/endian.h>." 2013-02-22 21:46:59 +00:00
Elliott Hughes
440bc83d92 am 7b2c6385: Merge "Fix <memory.h> to be a synonym for <string.h> like in glibc."
* commit '7b2c6385effbb6d6e98bfe29cc6c144211128d9e':
  Fix <memory.h> to be a synonym for <string.h> like in glibc.
2013-02-22 12:10:15 -08:00
Nick Kralevich
cadc858329 am bc0e7ee1: Merge "libc: add sys/signal.h for compatibility"
* commit 'bc0e7ee18157c2ffe65644514555689f4259ccfe':
  libc: add sys/signal.h for compatibility
2013-02-22 11:53:05 -08:00
Elliott Hughes
7b2c6385ef Merge "Fix <memory.h> to be a synonym for <string.h> like in glibc." 2013-02-22 19:49:39 +00:00
Nick Kralevich
bc0e7ee181 Merge "libc: add sys/signal.h for compatibility" 2013-02-22 19:33:53 +00:00
Elliott Hughes
6f1b7a6407 Fix <memory.h> to be a synonym for <string.h> like in glibc.
Change-Id: If23589c5d85dffd28788e04b010303620fa178ca
2013-02-22 11:11:48 -08:00
Elliott Hughes
cf820d7e96 Add the glibc-compatible names to <sys/endian.h>.
Also remove declarations for functions that don't exist; these
are all macros.

Bug: http://code.google.com/p/android/issues/detail?id=41769
Change-Id: Ia3774ab2ff7d3c535f83774eac61068f9b11e194
2013-02-22 11:04:27 -08:00
Nick Kralevich
b22a684990 libc: add sys/signal.h for compatibility
Some applications look for sys/signal.h instead of signal.h.
Work around those apps.

Change-Id: I76ac7744ebc56d196b5f0cb9ed381d32817436b9
2013-02-22 10:38:28 -08:00
Elliott Hughes
2a5b57db8c am 580a7073: Merge "Stop advertising rindex(3), which is both deprecated and unimplemented."
* commit '580a707376d81bfcb39919f0a1203b39a39dbd8a':
  Stop advertising rindex(3), which is both deprecated and unimplemented.
2013-02-21 17:55:28 -08:00
Elliott Hughes
580a707376 Merge "Stop advertising rindex(3), which is both deprecated and unimplemented." 2013-02-22 01:45:02 +00:00
Elliott Hughes
538f6fc202 Stop advertising rindex(3), which is both deprecated and unimplemented.
Change-Id: I3c775d9974e49c3f76a53e46e022659657b89034
2013-02-21 17:39:06 -08:00
Nick Kralevich
398f46dd92 am a0259b42: Merge "libc: remove bcmp prototype"
* commit 'a0259b42eba08e6d71a274fa3f770afccbb93107':
  libc: remove bcmp prototype
2013-02-21 17:39:03 -08:00
Nick Kralevich
a0259b42eb Merge "libc: remove bcmp prototype" 2013-02-22 01:22:35 +00:00
Nick Kralevich
11ebbc8437 libc: remove bcmp prototype
AFAIK, bionic only ever provided an implementation of bcmp
for x86, and even then, the code was never actually compiled.
Remove the prototype.

bcmp() has been obsoleted and replaced by memcmp()

Change-Id: I549d02ab6a9241a9acbbbfade0d98a9a02c2eaee
2013-02-21 17:17:09 -08:00
Elliott Hughes
719d46f8ac am a9ff09d1: Merge "Fix raise(3) so it works in signal handlers."
* commit 'a9ff09d1fc22292adc12cf99d4d44448d619b3cf':
  Fix raise(3) so it works in signal handlers.
2013-02-21 14:17:47 -08:00
Elliott Hughes
a9ff09d1fc Merge "Fix raise(3) so it works in signal handlers." 2013-02-21 20:05:35 +00:00
Elliott Hughes
fae89fc404 Fix raise(3) so it works in signal handlers.
We could special-case raise(3) in non-threaded programs, but the more
conservative course is to make pthread_kill(3) work in signal handlers
at the cost of a race shared by other C libraries.

Change-Id: I59fb23d03bdabf403435e731704b33acdf3e0234
2013-02-21 11:22:23 -08:00
Jean-Baptiste Queru
818b1423d2 Fix mako builds. Do not merge.
Revert "Regenerate msm_ion.h."

This reverts commit 3fac8f7f49.
2013-02-20 12:47:58 -08:00
Elliott Hughes
ccd403161c Merge "use architecture-specific ssize_t definition" 2013-02-19 22:13:53 +00:00
Thorsten Glaser
c641cafbc3 use architecture-specific ssize_t definition
after change 32822 was rejected, this is the more light-weight
version of the fix: libc/include/sys/types.h already - via
libc/kernel/common/linux/posix_types.h - includes a definition
of __kernel_ssize_t from libc/kernel/arch-*/asm/posix_types.h
which is architecture-specific, toolchain-agnostic and also
gets rid of the gcc -Wformat warning (which it issues correctly,
since this i̲s̲ indeed a bug in bionic)

Change-Id: Ie4503ab16628bc25815a836d07556f665e9795c7
2013-02-19 14:12:55 -08:00
Elliott Hughes
593abb7b59 Merge "stdlib: atexit: include <sys/cdefs.h>" 2013-02-19 21:58:18 +00:00
Elliott Hughes
eeecff7293 Merge "Fix pthreads functions that should return ESRCH." 2013-02-19 21:37:55 +00:00
Elliott Hughes
9d23e04c43 Fix pthreads functions that should return ESRCH.
imgtec pointed out that pthread_kill(3) was broken, but most of the
other functions that ought to return ESRCH for invalid/exited threads
were equally broken.

Change-Id: I96347f6195549aee0c72dc39063e6c5d06d2e01f
2013-02-19 12:21:41 -08:00
Elliott Hughes
b5862d4d8a Merge "Update linker README." 2013-02-19 19:14:26 +00:00
Elliott Hughes
aa772a33ba Update linker README.
Change-Id: Icaa353e9cf1848c86e7445f4ad590bdab44f7941
2013-02-19 11:13:44 -08:00
Chirayu Desai
61ba9b526b stdlib: atexit: include <sys/cdefs.h>
Change-Id: Ib9eb167710a021e0a2b5c77a06a9338cdc748e6d
2013-02-16 21:23:27 +05:30
Elliott Hughes
7f67f78ad5 Merge "dalvik is big enough and ugly enough to handle System.arraycopy itself." 2013-02-16 00:40:21 +00:00
Elliott Hughes
081318e355 dalvik is big enough and ugly enough to handle System.arraycopy itself.
Change-Id: I4b54a15ea101c0c6bab06cfb11e4178f5a57fc05
2013-02-15 14:27:52 -08:00
Elliott Hughes
39804dcde6 Merge "Fix the pthread_setname_np test." 2013-02-15 21:55:22 +00:00
Elliott Hughes
40eabe24e4 Fix the pthread_setname_np test.
Fix the pthread_setname_np test to take into account that emulator kernels are
so old that they don't support setting the name of other threads.

The CLONE_DETACHED thread is obsolete since 2.5 kernels.

Rename kernel_id to tid.

Fix the signature of __pthread_clone.

Clean up the clone and pthread_setname_np implementations slightly.

Change-Id: I16c2ff8845b67530544bbda9aa6618058603066d
2013-02-15 12:08:59 -08:00
Elliott Hughes
3e3b239d2b Merge "Fix the stack protector death test." 2013-02-15 02:10:29 +00:00
Elliott Hughes
7fd803cdfa Fix the stack protector death test.
Now __stack_chk_fail calls abort(3) directly, we terminate with
SIGSEGV rather than SIGABRT. (Because of the workaround for the
debuggerd lossage in the abort(3) implementation, which was the
motivation for switching __stack_chk_fail over to abort(3).)

Also clarify the comment on the weird pthread death test, so it
doesn't get copied and pasted onto real death tests.

Change-Id: Ie832eaded61359c99e7a10db65e28f35e8f63eed
2013-02-14 16:35:58 -08:00
Elliott Hughes
0a2cb81597 Merge "Simplify __stack_chk_fail, and fix it so we get debuggerd stack traces." 2013-02-14 23:50:13 +00:00
Nick Kralevich
b128f49fd5 Merge "bionic: Add securebits.h" 2013-02-14 22:49:28 +00:00
Elliott Hughes
fb7eb5e07f Simplify __stack_chk_fail, and fix it so we get debuggerd stack traces.
Bug: 2487269
Change-Id: Iec5e470fc22cd9108404f634a9d4baa2c7b7f58f
2013-02-14 14:37:34 -08:00
Nick Kralevich
0276656daa bionic: Add securebits.h
Change-Id: I2031796b9be117558b80246498b29736492cf269
2013-02-14 14:03:37 -08:00
Elliott Hughes
c2d26ce745 Merge "Turn on -Werror for ssp.cpp." 2013-02-14 19:17:33 +00:00
Elliott Hughes
dc5ec07158 Turn on -Werror for ssp.cpp.
libc_bionic.a is already compiled -Werror, but this one file gets
compiled into its own library because it needs to be compiled with
-fno-stack-protector.

Change-Id: I273c535ab5c73ccaccbcf793fda1f788a2589abe
2013-02-14 11:15:58 -08:00
Nick Kralevich
fe33fc790a Merge "fix compiler warning." 2013-02-14 17:48:30 +00:00
Nick Kralevich
a261afb7c9 fix compiler warning.
bionic/libc/bionic/ssp.cpp:41:31: warning: converting to non-pointer type 'uintptr_t {aka unsigned int}' from NULL [-Wconversion-null]

Change-Id: Id154ed4a99520cca64ffd3dbe4d743db6e2da28a
2013-02-14 09:44:13 -08:00
Elliott Hughes
6b97c7dc03 Merge "ffs was not being built for x86." 2013-02-14 00:49:47 +00:00
Elliott Hughes
97f2ec50a7 Merge "Stop using the local gcc." 2013-02-14 00:48:53 +00:00
Elliott Hughes
26a13bcfb8 Stop using the local gcc.
Some build servers are still out of date, so we're better off having
the known quanitity of the consistently out-of-date prebuilt host gcc.

Change-Id: Ib6308ae926ffa1ac5d95efbbf32052344c17a6b8
2013-02-13 16:32:47 -08:00
Elliott Hughes
d2547040a1 ffs was not being built for x86.
Change-Id: I53e92273664a4d0a13536c2fa1aeb87e1f3cf4e8
2013-02-13 16:31:52 -08:00
Elliott Hughes
95b1ea1bb3 Merge "Add a bunch more missing ENDs to assembler routines." 2013-02-13 23:18:26 +00:00
Elliott Hughes
6719500dbd Add a bunch more missing ENDs to assembler routines.
This isn't everything; I've missed out those x86 files that are

Change-Id: Idb7bb1a68796d6c0b70ea2b5c3300e49da6c62d2
2013-02-13 15:12:32 -08:00
Elliott Hughes
2fee0340a9 Merge "Everyone has CLZ." 2013-02-13 23:10:19 +00:00
Elliott Hughes
73964c592c Everyone has CLZ.
Even armv5 had CLZ.

Change-Id: I51bc8d1166d09940fd0d3f4c7717edf26977082c
2013-02-13 14:40:48 -08:00
Elliott Hughes
627274292e Merge "Update getnameinfo.c, remove dead code, and fix error reporting." 2013-02-13 21:15:05 +00:00
Elliott Hughes
d8213bb573 Update getnameinfo.c, remove dead code, and fix error reporting.
Also add a unit test for the salen size checking.

Bug: 1889275
Change-Id: I8ec4107df9e2e9a8571e8915525249c6e44b98ad
2013-02-13 13:11:11 -08:00
Elliott Hughes
f659b3c90d Merge "Add a test that getaddrinfo works when hints are NULL." 2013-02-13 17:04:01 +00:00
Elliott Hughes
d3b9d11369 Add a test that getaddrinfo works when hints are NULL.
Bug: 1827911
Change-Id: I9e1b774c44c10a8c5391bcf3baf1607f50eaf214
2013-02-13 08:22:07 -08:00
Elliott Hughes
3002d64bcd Merge "Everyone has a TLS register." 2013-02-13 16:03:32 +00:00
Elliott Hughes
9cbe1e63dd Merge "Fix __pthread_clone and __bionic_clone error handling on x86." 2013-02-13 16:02:10 +00:00
Elliott Hughes
b6032515a0 Fix __pthread_clone and __bionic_clone error handling on x86.
Bug: 3461078
Change-Id: I93c151e27411211dd32717f206745c62c08c21ee
2013-02-12 23:02:33 -08:00
Elliott Hughes
91a9925998 Everyone has a TLS register.
Change-Id: Id7cdf67087aa7d5074c9c59b7e595bc391d9f146
2013-02-12 21:56:42 -08:00
Elliott Hughes
baf2c09f3f Merge "Put the right number of Ls after 64-bit constants." 2013-02-13 04:20:43 +00:00
Elliott Hughes
5227663d2f Put the right number of Ls after 64-bit constants.
Change-Id: I9f96259f21e42a84b9ebe20655fe0edb31f41892
2013-02-12 20:18:49 -08:00
Elliott Hughes
59aeff9417 Merge "Use ENTRY/END in custom x86 assembler too." 2013-02-13 04:01:01 +00:00
Elliott Hughes
07f7e62a25 Merge "Clean up pthread_create." 2013-02-13 01:56:46 +00:00
Elliott Hughes
899d65283d Merge "Give up trying to build the pthread_setname_np tests for glibc." 2013-02-13 01:56:07 +00:00
Elliott Hughes
9701d4b701 Give up trying to build the pthread_setname_np tests for glibc.
Looks like using /usr/bin/g++ isn't enough on some of our older
build servers.

Change-Id: Id7681fb164eb6324b10050f6bb237393e95b41e9
2013-02-12 17:55:22 -08:00
Elliott Hughes
4b4a882428 Clean up pthread_create.
Bug: 3461078
Change-Id: I082122a86d7692cd58f4145539241be026258ee0
2013-02-12 17:15:59 -08:00
Elliott Hughes
991ee7d895 Merge "Simplify pthread_create, using more public API." 2013-02-13 00:39:41 +00:00
Elliott Hughes
6d33918207 Simplify pthread_create, using more public API.
Change-Id: I08e65ba88ed01436223e4e528631c9e41ec0e7f4
2013-02-12 16:36:04 -08:00
Elliott Hughes
4912782c6a Merge "Really set errno if __pthread_clone fails." 2013-02-13 00:12:42 +00:00
Elliott Hughes
9f878c2fca Really set errno if __pthread_clone fails.
If r0 == 0, we're the child. If r0 > 0, we're the parent.
Otherwise set errno.

The __bionic_clone code I copy & pasted was wrong. This patch
fixes both.

Bug: 3461078
Change-Id: Ibb7d6cc7e54e666841f2f0dc59a141a0b31982e4
2013-02-12 16:07:06 -08:00
Elliott Hughes
558a8b1d3b Merge "Revert "Revert "More pthreads cleanup.""" 2013-02-13 00:05:24 +00:00
Elliott Hughes
3e898476c7 Revert "Revert "More pthreads cleanup.""
This reverts commit 6f94de3ca4

(Doesn't try to increase the number of TLS slots; that leads to
an inability to boot. Adds more tests.)

Change-Id: Ia7d25ba3995219ed6e686463dbba80c95cc831ca
2013-02-12 15:27:18 -08:00
Dima Zavin
3fa6746536 Merge "add factory property file definition" 2013-02-12 18:33:26 +00:00
Elliott Hughes
bfa199ab40 am fcaf4e9f: Merge "Revert "More pthreads cleanup.""
# Via Gerrit Code Review
* commit 'fcaf4e9f9b735e053469c7ecbf63584e10fd67a7':
  Revert "More pthreads cleanup."
2013-02-11 22:16:56 -08:00
Elliott Hughes
a2a9817f4c am 85f491f9: Merge "More pthreads cleanup."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '85f491f96da3b79d0d7cc5368bc1f649e1a82340':
  More pthreads cleanup.
2013-02-11 18:35:56 -08:00
Elliott Hughes
605cc29358 am 83bf28e6: Merge "Fix MIPS build."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '83bf28e6a38dbf28707147b50d29e81f4b555046':
  Fix MIPS build.
2013-02-11 18:19:34 -08:00
Elliott Hughes
8e2b28cb6c am c56be54a: Merge "Use ENTRY/END in ARM __get_sp."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'c56be54a18eff7e1c35c9a19cfc8b84a80780b73':
  Use ENTRY/END in ARM __get_sp.
2013-02-11 17:27:40 -08:00
Elliott Hughes
bdff26df27 Use ENTRY/END in custom x86 assembler too.
Change-Id: Ic2e482e5daff29c65d3b2ab0b2111c996bbc6226
2013-02-11 17:08:16 -08:00
Elliott Hughes
c440d24423 am cae7b2cf: Merge "Fix __pthread_clone on ARM to set errno on failure."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'cae7b2cfb509e7d5d10a8085b1ec319daaef768f':
  Fix __pthread_clone on ARM to set errno on failure.
2013-02-11 16:55:16 -08:00
Elliott Hughes
03798dd23c am 1fea0f25: Merge "Clean up ARM assembler files to use ENTRY/END."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '1fea0f258a45d918fe5ae8e9769f45c0348bd095':
  Clean up ARM assembler files to use ENTRY/END.
2013-02-11 16:17:48 -08:00
Elliott Hughes
6b73d13fa4 am 2d3e7233: Merge "Revert "Revert "Pull the pthread_key_t functions out of pthread.c."""
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '2d3e72336e76180fb00822386da4f14203d117ce':
  Revert "Revert "Pull the pthread_key_t functions out of pthread.c.""
2013-02-11 12:38:30 -08:00
Elliott Hughes
e4b08318c1 am 8397cdba: Merge "Revert "Pull the pthread_key_t functions out of pthread.c.""
# Via Gerrit Code Review
* commit '8397cdba9424febeaed4068829a5b0174ee1138c':
  Revert "Pull the pthread_key_t functions out of pthread.c."
2013-02-11 12:21:50 -08:00
Elliott Hughes
024246ec27 am 09e89c3c: Merge "Pull the pthread_key_t functions out of pthread.c."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '09e89c3ced51d846e13c2508fbb6812bb61475cd':
  Pull the pthread_key_t functions out of pthread.c.
2013-02-11 12:05:45 -08:00
Elliott Hughes
8f509e8be1 am 9a9bb243: Merge "Switch to using AT_RANDOM for the stack guards."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '9a9bb243b50be5e3910b8edad72327bc216e72d0':
  Switch to using AT_RANDOM for the stack guards.
2013-02-08 11:22:05 -08:00
Elliott Hughes
636d21f32b am 03579da2: Merge "Add a few more missing libm long double stubs."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '03579da2d91a57a0f22da14e4216ecaf811869f4':
  Add a few more missing libm long double stubs.
2013-02-07 18:15:06 -08:00
Nick Kralevich
f156b901b1 am fa75fce5: Merge "update xattr.h"
# Via Gerrit Code Review (1) and Nick Kralevich (1)
* commit 'fa75fce56641255a571b8b472f010863c3095b70':
  update xattr.h
2013-02-07 16:50:08 -08:00
Elliott Hughes
86e4e23408 am 964886af: Merge "Remove dead code from gensyscalls.py."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '964886afa339959aedad1c09af738df4ffa4091d':
  Remove dead code from gensyscalls.py.
2013-02-07 14:57:41 -08:00
Elliott Hughes
5bb67760f0 am f6afd3b6: Merge "Fix x86 build, remove void* arithmetic."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'f6afd3b670e23f56bf341d12136416aee17ea249':
  Fix x86 build, remove void* arithmetic.
2013-02-07 12:34:24 -08:00
Elliott Hughes
8ddef40dad am 59e9a496: Merge "__progname should be const char*, not char*."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '59e9a496b4341fd2b11d4a01544bf7edf3d00cc5':
  __progname should be const char*, not char*.
2013-02-07 12:34:23 -08:00
Elliott Hughes
6f67cd224e am 2f41531f: Merge "Clean up the argc/argv/envp/auxv handling."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '2f41531ff9f48dbdaf2ba711e14c669031728e99':
  Clean up the argc/argv/envp/auxv handling.
2013-02-07 11:56:57 -08:00
Elliott Hughes
4e9d9e4df8 am d4187efd: Merge "Switch x86 syscall stubs over to the ENTER/END style of the ARM stubs."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'd4187efd7f9c30ffaff4738862e3d08be27a52e9':
  Switch x86 syscall stubs over to the ENTER/END style of the ARM stubs.
2013-02-07 09:12:18 -08:00
Elliott Hughes
09559dd046 am d32fdbaf: Merge "Add a missing logbl stub."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'd32fdbaf03f688497adbec885e85c0a69f7a4542':
  Add a missing logbl stub.
2013-02-06 16:30:26 -08:00
Elliott Hughes
d2a6d8d77d am c9ab32e4: Merge "Improve benchmarking tool, add a few math benchmarks."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'c9ab32e4e94f770de347d6da8e7615fa131bcfbb':
  Improve benchmarking tool, add a few math benchmarks.
2013-02-06 16:06:24 -08:00
Elliott Hughes
5fe286618f am d10a5a02: Merge "Remove the currently-unused i387 assembler to make way for the new."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'd10a5a02d1e9315dd7d780c2f221d116ced45a69':
  Remove the currently-unused i387 assembler to make way for the new.
2013-02-06 12:22:30 -08:00
Elliott Hughes
0e51a86124 am 4fc2a74b: Merge "Remove partial implementation of MIPS non-PIC support."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '4fc2a74b29a12ed7fb4edc6b76db96b9d7dfa3f2':
  Remove partial implementation of MIPS non-PIC support.
2013-02-06 11:51:12 -08:00
Elliott Hughes
df8c72e510 am d7ff139f: Merge "Remove bogus extra alignment from sbrk."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'd7ff139fec5cec08793523aa97503ade2b13c38e':
  Remove bogus extra alignment from sbrk.
2013-02-05 17:09:55 -08:00
Elliott Hughes
3cbcb87bd1 am a4f88fdc: Merge "Document the mallinfo struct, add missing attributes."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'a4f88fdcf0e9be084d04048ad65671716298c3c2':
  Document the mallinfo struct, add missing attributes.
2013-02-04 13:59:15 -08:00
Elliott Hughes
384bdb3776 am a3120aaf: Merge "Add basic tests for posix_memalign."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'a3120aaf2f4e20261a2ea9fd8862e2b360183fc5':
  Add basic tests for posix_memalign.
2013-02-04 13:25:55 -08:00
Elliott Hughes
ada6de673b am a1821f01: Merge "Regenerate msm_ion.h."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'a1821f015306e221e6a51e5acc27176ae2d72f6b':
  Regenerate msm_ion.h.
2013-02-04 10:52:16 -08:00
Elliott Hughes
16444e0f85 am 3dc6b57c: Merge "Don\'t claim there were no leaks if we weren\'t even checking."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '3dc6b57cf54b15a756551deeda33df5715e519bb':
  Don't claim there were no leaks if we weren't even checking.
2013-02-01 17:26:56 -08:00
Elliott Hughes
33e4365d87 am 1bf83555: Merge "Make sincosl call sinl and cosl."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '1bf835558a3f7424c805766250ace88fa70e0630':
  Make sincosl call sinl and cosl.
2013-02-01 16:50:49 -08:00
Elliott Hughes
422c400dd3 am ae70b946: Merge "Update the libm/NOTICE file after the upgrade."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'ae70b9467419c5a9b21e06dc1dba201a112a83be':
  Update the libm/NOTICE file after the upgrade.
2013-02-01 16:35:08 -08:00
Elliott Hughes
704d9c5b45 am a9dd3670: Merge "Restore bionic\'s <linux/elf-em.h>."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'a9dd36702f4a9d65d084387050b688b8965b13b3':
  Restore bionic's <linux/elf-em.h>.
2013-02-01 16:19:37 -08:00
Elliott Hughes
ff26e25b51 am 44badc70: Merge "Upgrade libm."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '44badc70ccd35d7690bda9f107e3e5da0b80d295':
  Upgrade libm.
2013-02-01 15:24:27 -08:00
Elliott Hughes
9743d7fb60 am fb55511e: am e1a124e5: Merge "Say explicitly if there were no leaks."
# Via Android Git Automerger (1) and others
* commit 'fb55511e71900476fd03f9c490dc60269d076d1f':
  Say explicitly if there were no leaks.
2013-01-30 12:11:30 -08:00
Elliott Hughes
fb55511e71 am e1a124e5: Merge "Say explicitly if there were no leaks."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'e1a124e5c93b59e2b6792e4de8a2c72dc0a78be1':
  Say explicitly if there were no leaks.
2013-01-30 12:09:14 -08:00
Elliott Hughes
9d43c07987 am 4e882503: am a990cf5b: Merge "Clean up trailing whitespace in the kernel headers."
# Via Android Git Automerger (1) and others
* commit '4e8825038e08762dcc973fa435b531f10290ffa8':
  Clean up trailing whitespace in the kernel headers.
2013-01-30 10:31:24 -08:00
Elliott Hughes
4e8825038e am a990cf5b: Merge "Clean up trailing whitespace in the kernel headers."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'a990cf5b3392c5aef767aee1e67b4d7ef651afc6':
  Clean up trailing whitespace in the kernel headers.
2013-01-30 10:28:59 -08:00
Elliott Hughes
5821e11d0b am 7bc49fcf: am 323287ea: Merge "Fix valgrind build."
# Via Android Git Automerger (1) and others
* commit '7bc49fcfc0de8ce6f386f362cdab6902e6bbe7b4':
  Fix valgrind build.
2013-01-29 18:09:26 -08:00
Elliott Hughes
7bc49fcfc0 am 323287ea: Merge "Fix valgrind build."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '323287ea7fb1d22f64b49d701b33cef9fbaf757d':
  Fix valgrind build.
2013-01-29 18:06:19 -08:00
Elliott Hughes
9d995d6721 am 1cc09402: am f6721978: Merge "Fix x86 build to use <elf.h>."
# Via Android Git Automerger (1) and others
* commit '1cc09402e86b91213e06a9a349c5c510cefcf1d5':
  Fix x86 build to use <elf.h>.
2013-01-29 17:04:14 -08:00
Elliott Hughes
0a383883fa am be11de2b: am 5fb409b7: Merge "Bring the NOTICE files back up to date."
# Via Android Git Automerger (1) and others
* commit 'be11de2b52ac5f8cbb5a733821cb27002f2ea975':
  Bring the NOTICE files back up to date.
2013-01-29 17:04:13 -08:00
Elliott Hughes
a7f44b5afe am 172e038f: am f09f6db5: Merge "Use the NetBSD <sys/exec_elf.h>."
# Via Android Git Automerger (1) and others
* commit '172e038f9e5b711a77afe7e8899343215cf25c9e':
  Use the NetBSD <sys/exec_elf.h>.
2013-01-29 17:04:11 -08:00
Elliott Hughes
1cc09402e8 am f6721978: Merge "Fix x86 build to use <elf.h>."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'f67219783fa7c38c1f4f077364290d10d3aa1db4':
  Fix x86 build to use <elf.h>.
2013-01-29 16:58:45 -08:00
Elliott Hughes
be11de2b52 am 5fb409b7: Merge "Bring the NOTICE files back up to date."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '5fb409b7b0b0a4ecca5fd2a6c7dc5ce421f6b7cf':
  Bring the NOTICE files back up to date.
2013-01-29 16:42:04 -08:00
Elliott Hughes
172e038f9e am f09f6db5: Merge "Use the NetBSD <sys/exec_elf.h>."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'f09f6db5dd87856bbbb7a3d465187b9d8799a788':
  Use the NetBSD <sys/exec_elf.h>.
2013-01-29 16:26:10 -08:00
Elliott Hughes
f5d6238c4b am 3f20ecc2: am 3db2fc5a: Merge "Don\'t collect useless stack frames; do demangle C++ symbols."
# Via Android Git Automerger (1) and others
* commit '3f20ecc20486ae8fe8d9332102b503135f83c62d':
  Don't collect useless stack frames; do demangle C++ symbols.
2013-01-29 12:07:13 -08:00
Elliott Hughes
3f20ecc204 am 3db2fc5a: Merge "Don\'t collect useless stack frames; do demangle C++ symbols."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '3db2fc5acb4894a2cb22533b165a0de1bbafc3f6':
  Don't collect useless stack frames; do demangle C++ symbols.
2013-01-29 12:03:53 -08:00
Elliott Hughes
2218b89ca7 am d383ac15: am a0151cbf: Merge "Unit tests for formatting code, fix %%."
# Via Android Git Automerger (1) and others
* commit 'd383ac1570b553d68ed399c74b73bad1498d78f6':
  Unit tests for formatting code, fix %%.
2013-01-28 14:11:54 -08:00
Elliott Hughes
d383ac1570 am a0151cbf: Merge "Unit tests for formatting code, fix %%."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit 'a0151cbfbaa37965dbcd188a55a78d3ad4802b9b':
  Unit tests for formatting code, fix %%.
2013-01-28 14:10:08 -08:00
Elliott Hughes
d265d36749 am f07db754: am 0a91b11d: Merge "More debug malloc fixes."
# Via Android Git Automerger (1) and others
* commit 'f07db75443cce4ab96c93bdaa0793d7b8e87547e':
  More debug malloc fixes.
2013-01-25 18:09:45 -08:00
Elliott Hughes
f07db75443 am 0a91b11d: Merge "More debug malloc fixes."
# Via Elliott Hughes (1) and Gerrit Code Review (1)
* commit '0a91b11d286446fe7849a6f537b4a21a52f63399':
  More debug malloc fixes.
2013-01-25 18:07:23 -08:00
Elliott Hughes
88af723267 am 7ae99845: am 6a94cb74: Merge "arm syscall : for eabi call_default don\'t use stack"
# By Matthieu Castet
# Via Android Git Automerger (1) and others
* commit '7ae998456c2564aff4ecbe6a6df214821f6e26cc':
  arm syscall : for eabi call_default don't use stack
2013-01-25 14:10:02 -08:00
Nick Kralevich
2ebcd19cbc am 27818d2a: am b871e5d6: Merge "system_properties: do more checking of file"
# Via Android Git Automerger (1) and others
* commit '27818d2a16e4972acba15368f55a59cc0ed990f0':
  system_properties: do more checking of file
2013-01-25 14:10:01 -08:00
Elliott Hughes
7ae998456c am 6a94cb74: Merge "arm syscall : for eabi call_default don\'t use stack"
# By Matthieu Castet
# Via Gerrit Code Review (1) and Matthieu Castet (1)
* commit '6a94cb748bf63278c3271b0ab610061b0dc6f04a':
  arm syscall : for eabi call_default don't use stack
2013-01-25 14:08:26 -08:00
Nick Kralevich
27818d2a16 am b871e5d6: Merge "system_properties: do more checking of file"
# Via Gerrit Code Review (1) and Nick Kralevich (1)
* commit 'b871e5d6b3b4a214c7f19bdfca7663f1fe49fda8':
  system_properties: do more checking of file
2013-01-25 14:08:26 -08:00
Nick Kralevich
151e91e66c am da2d2c61: am 82ef8296: Merge "prctl.h: include sys/cdefs.h"
# Via Android Git Automerger (1) and others
* commit 'da2d2c618c23f212f8dec0c0615bf75d7c18b88d':
  prctl.h: include sys/cdefs.h
2013-01-24 16:11:56 -08:00
Nick Kralevich
da2d2c618c am 82ef8296: Merge "prctl.h: include sys/cdefs.h"
# Via Gerrit Code Review (1) and Nick Kralevich (1)
* commit '82ef8296dc5e25b9cc8e7231f9515f50185dac9d':
  prctl.h: include sys/cdefs.h
2013-01-24 08:59:00 -08:00
Nick Kralevich
b8726037ee am e7d937b5: am b3351f12: Merge "libc: use more secure system properties if available"
* commit 'e7d937b52f183ce84751701c369ffe6a4c81d033':
  libc: use more secure system properties if available
2013-01-23 11:09:40 -08:00
Nick Kralevich
e7d937b52f am b3351f12: Merge "libc: use more secure system properties if available"
* commit 'b3351f12047747b603efb070069e7afdf3040335':
  libc: use more secure system properties if available
2013-01-23 11:07:23 -08:00
Elliott Hughes
c08ab018ad am 40107623: am 778a68e1: Merge "Don\'t free anything when reporting leaks."
* commit '40107623b05fdc2b6c61d9c885483abd3add486a':
  Don't free anything when reporting leaks.
2013-01-22 22:14:32 -08:00
Elliott Hughes
ce8732b188 am 552e02fa: am 5c8f75ef: Merge "Disable leak checking for mksh; it\'s way too leaky."
* commit '552e02fa9c99fd234c57d756358e3208d11a13ad':
  Disable leak checking for mksh; it's way too leaky.
2013-01-22 22:14:31 -08:00
Elliott Hughes
40107623b0 am 778a68e1: Merge "Don\'t free anything when reporting leaks."
* commit '778a68e1e57408be04806b5bfc3150aade44bcbf':
  Don't free anything when reporting leaks.
2013-01-22 22:11:55 -08:00
Elliott Hughes
552e02fa9c am 5c8f75ef: Merge "Disable leak checking for mksh; it\'s way too leaky."
* commit '5c8f75ef8bd89498de1d1108efa54869a2784738':
  Disable leak checking for mksh; it's way too leaky.
2013-01-22 22:11:54 -08:00
Elliott Hughes
a8e0f2b956 am b16ec162: am 28f82b26: Merge "Add const for first argument of sigismember for fit POSIX spec"
* commit 'b16ec162881110a30f665ce7bd1432ccefba60b7':
  Add const for first argument of sigismember for fit POSIX spec
2013-01-22 17:29:01 -08:00
Elliott Hughes
b16ec16288 am 28f82b26: Merge "Add const for first argument of sigismember for fit POSIX spec"
* commit '28f82b260c9076aae437dafb57193a174aef1eb3':
  Add const for first argument of sigismember for fit POSIX spec
2013-01-22 17:26:45 -08:00
Elliott Hughes
5f79f75ba6 am 57edf36c: am 8d6302cd: Merge "Our strcoll(3) is no different from NetBSD\'s, so take exactly theirs."
* commit '57edf36c5f83fa7bcf9d8d028cde0acc7ddfadcc':
  Our strcoll(3) is no different from NetBSD's, so take exactly theirs.
2013-01-22 15:33:24 -08:00
Elliott Hughes
57edf36c5f am 8d6302cd: Merge "Our strcoll(3) is no different from NetBSD\'s, so take exactly theirs."
* commit '8d6302cdcff9c53fe3ecd36ab479fdf08318c504':
  Our strcoll(3) is no different from NetBSD's, so take exactly theirs.
2013-01-22 15:31:26 -08:00
Elliott Hughes
ad551eaa04 am 5f7b6b83: am 547eba0a: Merge "Avoid overflow in memccpy."
* commit '5f7b6b8301658a834516f05e8e0a9eb4513e6e62':
  Avoid overflow in memccpy.
2013-01-22 15:08:58 -08:00
Elliott Hughes
5f7b6b8301 am 547eba0a: Merge "Avoid overflow in memccpy."
* commit '547eba0a63951d7db93c0542e1ecab891725b9a8':
  Avoid overflow in memccpy.
2013-01-22 15:05:31 -08:00
Elliott Hughes
1029364ec5 am e8e1f34a: am 85597e2b: Merge "Clean up debuggerd-related logging."
* commit 'e8e1f34a33158290aa3bb9059e31ddc7a73ac1e6':
  Clean up debuggerd-related logging.
2013-01-22 14:53:25 -08:00
Elliott Hughes
e8e1f34a33 am 85597e2b: Merge "Clean up debuggerd-related logging."
* commit '85597e2b59af91be0607ed073b6574b934ed369d':
  Clean up debuggerd-related logging.
2013-01-22 14:49:11 -08:00
Elliott Hughes
0dfb2ecaab am 0609c0fe: am 7af7895e: Merge "Use the new non-allocating logging for dlmalloc failures."
* commit '0609c0fe8dae2ec10f1811c46a4d583f557c68f7':
  Use the new non-allocating logging for dlmalloc failures.
2013-01-22 14:35:06 -08:00
Elliott Hughes
0609c0fe8d am 7af7895e: Merge "Use the new non-allocating logging for dlmalloc failures."
* commit '7af7895eeb810ff1a1ca8b60fcda13595d551114':
  Use the new non-allocating logging for dlmalloc failures.
2013-01-22 14:32:02 -08:00
Nick Kralevich
98ae1a85b0 am e652ed30: am 244bee5b: Merge "bionic_auxv.h: fix #define conflict"
* commit 'e652ed30514afcf314b40c69b9cac088602a83da':
  bionic_auxv.h: fix #define conflict
2013-01-22 13:43:46 -08:00
Nick Kralevich
e652ed3051 am 244bee5b: Merge "bionic_auxv.h: fix #define conflict"
* commit '244bee5bb6e0bc12b739c57028ac8af23a18aed0':
  bionic_auxv.h: fix #define conflict
2013-01-22 13:38:53 -08:00
Elliott Hughes
04afcd25a3 am 8ef83bc4: am 02f96b9d: Merge "Add missing extern "C"."
* commit '8ef83bc46e06e8fe98e918fcb1c582d6f5807461':
  Add missing extern "C".
2013-01-22 11:34:25 -08:00
Elliott Hughes
8ef83bc46e am 02f96b9d: Merge "Add missing extern "C"."
* commit '02f96b9db0242711fe1dfe0713c0c4e698561da5':
  Add missing extern "C".
2013-01-22 11:32:16 -08:00
Elliott Hughes
4cd5703b9e am 75b64a1b: am ca483765: Merge "Fix the duplication in the debugging code."
* commit '75b64a1b64e788b9e69ac4f4cd8cce37932513a8':
  Fix the duplication in the debugging code.
2013-01-22 10:02:12 -08:00
Elliott Hughes
75b64a1b64 am ca483765: Merge "Fix the duplication in the debugging code."
* commit 'ca483765bd0dc16294b9e67dd0de5c6d53b1bfa3':
  Fix the duplication in the debugging code.
2013-01-22 09:59:44 -08:00
Nick Kralevich
9468ee53ac am f246ae98: am 5496bbf6: Merge "Revert "Filter ANDROID_PROPERTY_WORKSPACE""
* commit 'f246ae984baa133b93af4e14f94ba35990b43dbb':
  Revert "Filter ANDROID_PROPERTY_WORKSPACE"
2013-01-18 13:11:13 -08:00
Nick Kralevich
f246ae984b am 5496bbf6: Merge "Revert "Filter ANDROID_PROPERTY_WORKSPACE""
* commit '5496bbf6a3592fd99cee6b8c20c8624c2aeea0c1':
  Revert "Filter ANDROID_PROPERTY_WORKSPACE"
2013-01-18 13:10:04 -08:00
Nick Kralevich
8e833972c2 am 0a0c2321: am 4bfaf1e5: Merge "FORTIFY_SOURCE: optimize"
* commit '0a0c23216766adf76739dc38dcb45934105cd41c':
  FORTIFY_SOURCE: optimize
2013-01-17 17:06:11 -08:00
Nick Kralevich
0a0c232167 am 4bfaf1e5: Merge "FORTIFY_SOURCE: optimize"
* commit '4bfaf1e5f62748b305406ff4ceebd5f4b750038c':
  FORTIFY_SOURCE: optimize
2013-01-17 17:04:33 -08:00
Elliott Hughes
a4723742c1 am 261e9d08: am e4ca88d9: Merge "Add functionlity to the scripts to replace tokens in kernel headers based on architecture."
* commit '261e9d08dbf1cd7fea7e1799338238d11d18cb7c':
  Add functionlity to the scripts to replace tokens in kernel headers based on architecture.
2013-01-17 16:51:09 -08:00
Elliott Hughes
261e9d08db am e4ca88d9: Merge "Add functionlity to the scripts to replace tokens in kernel headers based on architecture."
* commit 'e4ca88d9fa8757e4fb4056fcafa5bc15b406a2fd':
  Add functionlity to the scripts to replace tokens in kernel headers based on architecture.
2013-01-17 16:48:45 -08:00
Nick Kralevich
36c4eb188c am 3a72fe58: am f3fe1945: Merge "linker: add -Wl,--exclude-libs,ALL to LDFLAGS"
* commit '3a72fe587f454a2eea79b5564e4ab1d3880b51c8':
  linker: add -Wl,--exclude-libs,ALL to LDFLAGS
2013-01-17 09:01:14 -08:00
Nick Kralevich
3a72fe587f am f3fe1945: Merge "linker: add -Wl,--exclude-libs,ALL to LDFLAGS"
* commit 'f3fe19459fd9263e8cc8a413a5313b1ec3cf3975':
  linker: add -Wl,--exclude-libs,ALL to LDFLAGS
2013-01-17 08:58:27 -08:00
Nick Kralevich
91bc5865a3 am 8d01c055: am 1271cdc1: Merge "Revert "stack protector: use AT_RANDOM""
* commit '8d01c0557bb2b7ea30f4038b6c84b816800073a7':
  Revert "stack protector: use AT_RANDOM"
2013-01-16 13:55:55 -08:00
Nick Kralevich
8d01c0557b am 1271cdc1: Merge "Revert "stack protector: use AT_RANDOM""
* commit '1271cdc1c91c6ae688917bc8f4ae59d2a97b3e99':
  Revert "stack protector: use AT_RANDOM"
2013-01-16 13:53:25 -08:00
Nick Kralevich
27ff1ae414 am de666485: am ba117e41: Merge "stack protector: use AT_RANDOM"
* commit 'de666485b8123ac35be94109336f7c56a7e9e3c2':
  stack protector: use AT_RANDOM
2013-01-16 13:31:24 -08:00
Nick Kralevich
079e435655 am 30894bdf: am 1b34228b: Merge "Filter ANDROID_PROPERTY_WORKSPACE"
* commit '30894bdfd6e4c74ab673d47391e62fb14fb51381':
  Filter ANDROID_PROPERTY_WORKSPACE
2013-01-16 13:31:23 -08:00
Nick Kralevich
de666485b8 am ba117e41: Merge "stack protector: use AT_RANDOM"
* commit 'ba117e4172fe6f160bf5f4d58b37e12c08c34245':
  stack protector: use AT_RANDOM
2013-01-16 11:31:00 -08:00
Nick Kralevich
30894bdfd6 am 1b34228b: Merge "Filter ANDROID_PROPERTY_WORKSPACE"
* commit '1b34228bb289723c4ba0534eae57d0d085a3d0fa':
  Filter ANDROID_PROPERTY_WORKSPACE
2013-01-16 11:14:01 -08:00
Elliott Hughes
b989c9ceda Revert "DO NOT MERGE Revert "Add the libcutils localtime_tz and mktime_t extensions to bionic.""
This reverts commit f4b34b6c39.
2013-01-16 10:34:33 -08:00
Elliott Hughes
e05709b1df am e611fad0: am 14e1975e: Merge "Fix signalfd for MIPS."
* commit 'e611fad0d055f2d869981136e5e51b7a01d525fc':
  Fix signalfd for MIPS.
2013-01-16 09:48:04 -08:00
Elliott Hughes
e611fad0d0 am 14e1975e: Merge "Fix signalfd for MIPS."
* commit '14e1975e13c197180ed0481f305f83a362b16a24':
  Fix signalfd for MIPS.
2013-01-16 09:45:57 -08:00
Elliott Hughes
04aa0fdda6 am 42c5847d: (-s ours) am 791e26d9: (-s ours) Merge "Revert "DO NOT MERGE Revert "Add the libcutils localtime_tz and mktime_t extensions to bionic."""
* commit '42c5847d65d3e50a3efc4e32bb913dc66c9d0e4d':
  Revert "DO NOT MERGE Revert "Add the libcutils localtime_tz and mktime_t extensions to bionic.""
2013-01-15 13:37:56 -08:00
Elliott Hughes
42c5847d65 am 791e26d9: (-s ours) Merge "Revert "DO NOT MERGE Revert "Add the libcutils localtime_tz and mktime_t extensions to bionic."""
* commit '791e26d9598a72376b8a16a5ccfb5d1ae0010965':
  Revert "DO NOT MERGE Revert "Add the libcutils localtime_tz and mktime_t extensions to bionic.""
2013-01-15 13:35:58 -08:00
Nick Kralevich
d29c9fc188 am b59e358b: Merge "fix strerror_r test"
* commit 'b59e358bb902124cc7d648266a97f96beefc8142':
  fix strerror_r test
2013-01-15 11:15:21 -08:00
Ben Cheng
4130af46bf am b09d7d86: Merge "Add __aeabi_idiv to the dummy reference list."
* commit 'b09d7d86004ab75b774358454d8ee261987af96b':
  Add __aeabi_idiv to the dummy reference list.
2013-01-14 15:44:01 -08:00
Nick Kralevich
1519690cfb am 29fe857e: Merge "headers: update auxvec.h from Linux kernel"
* commit '29fe857ec80e59347e28458a5396eb68d1cba0e4':
  headers: update auxvec.h from Linux kernel
2013-01-14 14:31:28 -08:00
Ian Rogers
763d4bb345 am 68fa57f0: Merge "Name anonymous mmap mallocs."
* commit '68fa57f000285af20100c00db3d2bc143ad32294':
  Name anonymous mmap mallocs.
2013-01-14 11:19:00 -08:00
Nick Kralevich
13bd37160e am bb897fa9: Merge "libc_init_static: apply relro earlier."
* commit 'bb897fa9f79d25e4445fe3ab46b86657d6660c10':
  libc_init_static: apply relro earlier.
2013-01-14 10:25:16 -08:00
Elliott Hughes
375db86aea am 09d13c39: Merge "Fix my git mistake."
* commit '09d13c393e7b6a77cc33e5ef87e5c92ccd13fe63':
  Fix my git mistake.
2013-01-14 10:08:56 -08:00
Elliott Hughes
bc3c718346 am 48c632a3: Merge "[MIPS] Set DT_DEBUG dyntab entry if it is writable"
* commit '48c632a381b10996ec72a53cc95b009b06785d09':
  [MIPS] Set DT_DEBUG dyntab entry if it is writable
2013-01-14 09:46:19 -08:00
Nick Kralevich
fdd6dfa863 am 2c5153b0: libc: add getauxval()
* commit '2c5153b043b44e9935a334ae9b2d5a4bc5258b40':
  libc: add getauxval()
2013-01-11 16:59:57 -08:00
Andrew Boie
07564f2d3a add factory property file definition
This property file is used for properties which are set at device
provisioning time or in the factory. They are never touched by
a software update or factory data reset and typically contain
data specific to the particular unit.

Change-Id: I2e7c2fe62cb684cb2449eea917c42b19462e89a5
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2013-01-11 13:02:27 -08:00
Nick Kralevich
d0f81466e4 am 69c89942: Merge "Add stack canaries / strcpy tests."
* commit '69c89942db2ed472c71aa84903d0259cc84aa074':
  Add stack canaries / strcpy tests.
2013-01-11 11:26:54 -08:00
Wink Saville
801aeefe2d am a12c5445: Fix unused warnings in pthread.c
* commit 'a12c54454f3a6132988b68873903f6e9eed7f384':
  Fix unused warnings in pthread.c
2013-01-11 10:12:12 -08:00
Elliott Hughes
0f9be1eaee am bfde0b6f: Merge "glibc 2.15 treats errno as signed in strerror(3)."
* commit 'bfde0b6fd9e5de545746ab963d3a05ed2a8014f6':
  glibc 2.15 treats errno as signed in strerror(3).
2013-01-10 16:27:26 -08:00
Elliott Hughes
34c7a3c2b0 am 0d3700d9: Merge "Only have one copy of the kernel_sigset_t hack, and add more tests."
* commit '0d3700d957debe841c385f66a8026ca8b3755815':
  Only have one copy of the kernel_sigset_t hack, and add more tests.
2013-01-10 15:28:46 -08:00
Elliott Hughes
4ff6fa97e7 am 2bbb8fac: Merge "Add signalfd call to bionic"
* commit '2bbb8fac61e482dd96386620cc6f7f193e9c6840':
  Add signalfd call to bionic
2013-01-10 13:30:16 -08:00
Elliott Hughes
5c5fe07e43 am 364d9ee6: Merge "Don\'t test GNU-style ELF hashes on MIPS."
* commit '364d9ee62f583bca795a2b40f71a04e272c80389':
  Don't test GNU-style ELF hashes on MIPS.
2013-01-07 15:00:28 -08:00
Elliott Hughes
79cecbd421 am 7e22db03: Merge "Fix an off-by-one error in the sigset_t function error handling."
* commit '7e22db037e6d9ab117bf5d50c7aca85fe74941a0':
  Fix an off-by-one error in the sigset_t function error handling.
2013-01-07 14:21:26 -08:00
Elliott Hughes
2458d06fc1 am 26c5b2d4: Merge "[MIPS] Rewrite fenv.h for Android"
* commit '26c5b2d460e3b2595eb7f0605edcd02753a13594':
  [MIPS] Rewrite fenv.h for Android
2013-01-07 13:48:55 -08:00
Elliott Hughes
ec1370191e am 63dc5927: Merge "Add AF_CAN and PF_CAN (and other missing families)."
* commit '63dc592789e386ba2cd4e748090ba71d449a1e7c':
  Add AF_CAN and PF_CAN (and other missing families).
2013-01-03 16:59:39 -08:00
Elliott Hughes
81508de3d2 am f0036944: Merge "Fix debug malloc."
* commit 'f0036944a13a76dddda70347032128d4a27081d6':
  Fix debug malloc.
2013-01-03 16:42:42 -08:00
Elliott Hughes
c654f4fda8 am b1b53177: Merge "sysconf.c was renamed to sysconf.cpp (and modified)..."
* commit 'b1b5317799a34ac9e93f568af82952e52a9e9a13':
  sysconf.c was renamed to sysconf.cpp (and modified)...
2013-01-03 12:04:48 -08:00
Elliott Hughes
1287d106ed am fb62558b: Merge "Define _POSIX_MONOTONIC_CLOCK and implement sysconf(_SC_MONOTONIC_CLOCK)."
* commit 'fb62558bb4c25c8f71e0b949ba8552c674578674':
  Define _POSIX_MONOTONIC_CLOCK and implement sysconf(_SC_MONOTONIC_CLOCK).
2013-01-02 15:55:42 -08:00
Elliott Hughes
c0c05ff8ec am 918d776f: Merge "Support System.loadLibrary for libraries with transitive dependencies."
* commit '918d776f7edf1a9c4209ada4c2401ef0ea0bf660':
  Support System.loadLibrary for libraries with transitive dependencies.
2012-12-21 17:54:07 -08:00
Elliott Hughes
df5c77c91a am f6524f8b: Merge "Fix format_number."
* commit 'f6524f8bda87f7d25dad7bed7091a93f3de58728':
  Fix format_number.
2012-12-21 10:04:27 -08:00
Elliott Hughes
5fd31f6891 am 4b582142: Merge "Fix x86 dynamic linker build."
* commit '4b58214205d3d29dbdfed49964010235ef3f0403':
  Fix x86 dynamic linker build.
2012-12-19 09:46:32 -08:00
Elliott Hughes
a0edecf343 am a4ebdcf5: Merge "Check for unknown flags passed to dlopen(3)."
* commit 'a4ebdcf5bd20072f9a0e48c22fff401acda43e1e':
  Check for unknown flags passed to dlopen(3).
2012-12-18 19:41:24 -08:00
Elliott Hughes
607bd47707 am 4c4b08a3: Merge "Fix <endian.h> and <sys/endian.h>."
* commit '4c4b08a32eace878e4780ee340a57c43be950159':
  Fix <endian.h> and <sys/endian.h>.
2012-12-11 17:27:41 -08:00
Elliott Hughes
f822e7f68e am 0521ff82: Merge "Use pthread_kill() in raise()"
* commit '0521ff8234cafcc61060961b2f856fcf4dae1e7d':
  Use pthread_kill() in raise()
2012-12-10 11:40:04 -08:00
Geremy Condra
28996f8006 am 6fcf1770: Merge "Added audit.h from the kernel."
* commit '6fcf1770e5829b8439b7e7ed7efb39ee5d425e23':
  Added audit.h from the kernel.
2012-12-07 14:32:21 -08:00
Nick Kralevich
080210c16a am c8354f50: Merge "FORTIFY_SOURCE: remove memcpy overlap checks"
* commit 'c8354f501ef9a6d3430de27386bf0b42c30981d9':
  FORTIFY_SOURCE: remove memcpy overlap checks
2012-12-07 12:19:08 -08:00
Nick Kralevich
79e1a01e9e am 5dfdb701: Merge "FORTIFY_SOURCE: fix up previous commit"
* commit '5dfdb701292c53e74e319f277d808e598389c8e7':
  FORTIFY_SOURCE: fix up previous commit
2012-12-07 12:18:58 -08:00
Nick Kralevich
8717c3db2f am 7a34ed2b: Merge "clean up FORTIFY_SOURCE handling."
* commit '7a34ed2bb36fcbe6967d8b670f4d70ada1dcef49':
  clean up FORTIFY_SOURCE handling.
2012-12-04 15:52:53 -08:00
Nick Kralevich
ded6e3bfca am db79e827: Merge "FORTIFY_SOURCE: fortify strrchr"
* commit 'db79e827ebcb077ebaa6d6c96915c5bffba9a0ca':
  FORTIFY_SOURCE: fortify strrchr
2012-12-04 11:43:30 -08:00
Elliott Hughes
3960fb602c am 60fb6833: Merge "mmap: Remove madvise() workaround"
* commit '60fb68338b7541b6022fc343857b90c088c399cd':
  mmap: Remove madvise() workaround
2012-12-03 09:36:47 -08:00
Elliott Hughes
43701d68a9 am 46d64ed2: Merge "Add argument checking to sigemptyset(3) and friends."
* commit '46d64ed2e5cf8e37d53b116f2281b4a39cc4f288':
  Add argument checking to sigemptyset(3) and friends.
2012-12-03 07:53:28 -08:00
Nick Kralevich
4140d90c60 am 3acc908c: Merge "FORTIFY_SOURCE: fortify strchr"
* commit '3acc908c79aa9cba975861fa23fa2672733b5d5d':
  FORTIFY_SOURCE: fortify strchr
2012-11-30 17:13:20 -08:00
Elliott Hughes
7beaa5024f am 16c61f08: Merge "Reduce the exposure of the __set_errno implementation detail."
* commit '16c61f088524756ef0fa1b030719f6745eaef2db':
  Reduce the exposure of the __set_errno implementation detail.
2012-11-30 14:45:23 -08:00
Elliott Hughes
4835e6ef51 am 97b70b2b: Merge "Replace .S version of x86 crtfiles with .c version"
* commit '97b70b2bda47af46adf58dfde61050357114aa1f':
  Replace .S version of x86 crtfiles with .c version
2012-11-30 10:13:57 -08:00
Elliott Hughes
55210f74c7 am 07c0b73a: Merge "Remove (near-)duplicate definitions of size_t and ssize_t."
* commit '07c0b73a7fc9214e50fc6b9a15a06aeef0506e0f':
  Remove (near-)duplicate definitions of size_t and ssize_t.
2012-11-29 19:43:50 -08:00
Elliott Hughes
c5861c09d5 am 50e62e40: Merge "Bug: __WINT_TYPE__ and wint_t reference to different types"
* commit '50e62e4051c14d7d2a5e07ebe66afd2a81693a26':
  Bug: __WINT_TYPE__ and wint_t reference to different types
2012-11-29 15:58:13 -08:00
The Android Open Source Project
d6d5cbb02a Reconcile with jb-mr1.1-release - do not merge
Change-Id: Ib3c4bb83421360add15219251a389d25f1211497
2012-11-29 15:40:29 -08:00
Elliott Hughes
9ea86011a2 am 99ea84b1: Merge "[MIPS] Fix the MIPS getsid system call"
* commit '99ea84b12dd3ebebbbd527b8748f0d74bb1dfc0d':
  [MIPS] Fix the MIPS getsid system call
2012-11-29 14:51:57 -08:00
Elliott Hughes
852935b68f am 5d4b8421: Merge "Verify architecture neutral syscall numbers"
* commit '5d4b8421b3d040e8c2bdb83c8adb2752eab7b4b6':
  Verify architecture neutral syscall numbers
2012-11-29 14:51:56 -08:00
Elliott Hughes
56eb0bfedb am 78114220: Merge "Define DEFFILEMODE and friends"
* commit '781142208033e72b3773bcfab83cddf5ca34c9b2':
  Define DEFFILEMODE and friends
2012-11-29 14:37:06 -08:00
Naseer Ahmed
ef7eea5538 am 940e4c9d: am f61e5020: msm_mdp: Update header for 4 layer support
* commit '940e4c9d9218ee183c8eb522beb9838c0482084b':
  msm_mdp: Update header for 4 layer support
2012-11-29 14:28:29 -08:00
Naseer Ahmed
5115d4348e msm_mdp: Update header for 4 layer support
Bug: 7626586
Change-Id: I37730470dc09b7a9d6336b1603f87efd98f45e97
Signed-off-by: Iliyan Malchev <malchev@google.com>
2012-11-29 14:00:46 -08:00
Naseer Ahmed
940e4c9d92 am f61e5020: msm_mdp: Update header for 4 layer support
* commit 'f61e50201b7d5e793a479979c304d3388ca1405f':
  msm_mdp: Update header for 4 layer support
2012-11-29 12:40:57 -08:00
Naseer Ahmed
f61e50201b msm_mdp: Update header for 4 layer support
Bug: 7626586
Change-Id: I37730470dc09b7a9d6336b1603f87efd98f45e97
Signed-off-by: Iliyan Malchev <malchev@google.com>
2012-11-29 10:47:35 -08:00
1044 changed files with 33510 additions and 11319 deletions

15
ABI-bugs.txt Normal file
View File

@@ -0,0 +1,15 @@
KNOWN ABI BUGS
--------------
time_t is 32-bit. http://b/5819737
off_t is 32-bit. There is off64_t, but no _FILE_OFFSET_BITS support.
sigset_t is too small on ARM and x86 (but correct on MIPS), so support
for real-time signals is broken. http://b/5828899
Too few TLS slots mean we can't allocate 128 pthread_key_t instances,
which POSIX says should be the minimum.
atexit(3) handlers registered by a shared library aren't called on
dlclose(3); this only affects ARM. http://b/4998315

View File

@@ -6,68 +6,33 @@ include $(LOCAL_PATH)/arch-$(TARGET_ARCH)/syscalls.mk
# =========================================================
libc_common_src_files := \
$(syscall_src) \
unistd/abort.c \
unistd/alarm.c \
unistd/exec.c \
unistd/fnmatch.c \
unistd/getopt_long.c \
unistd/popen.c \
unistd/syslog.c \
unistd/system.c \
unistd/time.c \
stdio/asprintf.c \
stdio/clrerr.c \
stdio/fclose.c \
stdio/fdopen.c \
stdio/feof.c \
stdio/ferror.c \
stdio/fflush.c \
stdio/fgetc.c \
stdio/fgetln.c \
stdio/fgetpos.c \
stdio/fgets.c \
stdio/fileno.c \
stdio/findfp.c \
stdio/flags.c \
stdio/fopen.c \
stdio/fprintf.c \
stdio/fpurge.c \
stdio/fputc.c \
stdio/fputs.c \
stdio/fread.c \
stdio/freopen.c \
stdio/fscanf.c \
stdio/fseek.c \
stdio/fsetpos.c \
stdio/ftell.c \
stdio/funopen.c \
stdio/fvwrite.c \
stdio/fwalk.c \
stdio/fwrite.c \
stdio/getc.c \
stdio/getchar.c \
stdio/gets.c \
stdio/makebuf.c \
stdio/mktemp.c \
stdio/printf.c \
stdio/putc.c \
stdio/putchar.c \
stdio/puts.c \
stdio/putw.c \
stdio/refill.c \
stdio/remove.c \
stdio/rewind.c \
stdio/rget.c \
stdio/scanf.c \
stdio/setbuf.c \
stdio/setbuffer.c \
stdio/setvbuf.c \
stdio/snprintf.c\
stdio/sprintf.c \
stdio/sscanf.c \
stdio/stdio.c \
stdio/tempnam.c \
stdio/tmpnam.c \
stdio/ungetc.c \
stdio/vasprintf.c \
stdio/vfprintf.c \
@@ -78,13 +43,11 @@ libc_common_src_files := \
stdio/vscanf.c \
stdio/vsscanf.c \
stdio/wbuf.c \
stdio/wsetup.c \
stdlib/atexit.c \
stdlib/ctype_.c \
stdlib/exit.c \
stdlib/getenv.c \
stdlib/putenv.c \
stdlib/qsort.c \
stdlib/setenv.c \
stdlib/strtod.c \
stdlib/strtoimax.c \
@@ -95,56 +58,17 @@ libc_common_src_files := \
stdlib/strtoumax.c \
stdlib/tolower_.c \
stdlib/toupper_.c \
string/index.c \
string/strcasecmp.c \
string/strcat.c \
string/strchr.c \
string/strcspn.c \
string/strdup.c \
string/strlcat.c \
string/strlcpy.c \
string/strncat.c \
string/strncpy.c \
string/strpbrk.c \
string/strrchr.c \
string/__strrchr_chk.c \
string/strsep.c \
string/strspn.c \
string/strstr.c \
string/strtok.c \
wchar/wcpcpy.c \
wchar/wcpncpy.c \
wchar/wcscasecmp.c \
wchar/wcscat.c \
wchar/wcschr.c \
wchar/wcscmp.c \
wchar/wcscpy.c \
wchar/wcscspn.c \
wchar/wcsdup.c \
wchar/wcslcat.c \
wchar/wcslcpy.c \
wchar/wcslen.c \
wchar/wcsncasecmp.c \
wchar/wcsncat.c \
wchar/wcsncmp.c \
wchar/wcsncpy.c \
wchar/wcsnlen.c \
wchar/wcspbrk.c \
wchar/wcsrchr.c \
wchar/wcsspn.c \
wchar/wcsstr.c \
wchar/wcstok.c \
wchar/wcswidth.c \
wchar/wcsxfrm.c \
wchar/wmemchr.c \
wchar/wmemcmp.c \
wchar/wmemcpy.c \
wchar/wmemmove.c \
wchar/wmemset.c \
tzcode/asctime.c \
tzcode/difftime.c \
tzcode/localtime.c \
tzcode/strftime.c \
tzcode/strptime.c \
bionic/arc4random.c \
bionic/atoi.c \
bionic/atol.c \
@@ -177,15 +101,10 @@ libc_common_src_files := \
bionic/isatty.c \
bionic/issetugid.c \
bionic/ldexp.c \
bionic/logd_write.c \
bionic/lseek64.c \
bionic/md5.c \
bionic/memchr.c \
bionic/memmem.c \
bionic/memmove_words.c \
bionic/memrchr.c \
bionic/memswap.c \
bionic/mmap.c \
bionic/openat.c \
bionic/open.c \
bionic/pathconf.c \
@@ -196,12 +115,10 @@ libc_common_src_files := \
bionic/ptsname_r.c \
bionic/pututline.c \
bionic/pwrite.c \
bionic/realpath.c \
bionic/reboot.c \
bionic/recv.c \
bionic/sched_cpualloc.c \
bionic/sched_cpucount.c \
bionic/sched_getaffinity.c \
bionic/sched_getcpu.c \
bionic/semaphore.c \
bionic/send.c \
@@ -221,11 +138,11 @@ libc_common_src_files := \
bionic/sleep.c \
bionic/statfs.c \
bionic/strndup.c \
bionic/strnlen.c \
bionic/strntoimax.c \
bionic/strntoumax.c \
bionic/strtotimeval.c \
bionic/system_properties.c \
bionic/system_properties_compat.c \
bionic/tcgetpgrp.c \
bionic/tcsetpgrp.c \
bionic/thread_atexit.c \
@@ -234,7 +151,6 @@ libc_common_src_files := \
bionic/unlockpt.c \
bionic/usleep.c \
bionic/utmp.c \
bionic/wait.c \
bionic/wcscoll.c \
netbsd/gethnamaddr.c \
netbsd/inet/nsap_addr.c \
@@ -266,27 +182,46 @@ libc_common_src_files := \
netbsd/nameser/ns_samedomain.c \
libc_bionic_src_files := \
bionic/abort.cpp \
bionic/assert.cpp \
bionic/brk.cpp \
bionic/debug_format.cpp \
bionic/dirent.cpp \
bionic/eventfd.cpp \
bionic/__errno.c \
bionic/eventfd_read.cpp \
bionic/eventfd_write.cpp \
bionic/__fgets_chk.cpp \
bionic/getauxval.cpp \
bionic/getcwd.cpp \
bionic/libc_init_common.cpp \
bionic/libc_logging.cpp \
bionic/libgen.cpp \
bionic/__memcpy_chk.cpp \
bionic/__memmove_chk.cpp \
bionic/__memset_chk.cpp \
bionic/mmap.cpp \
bionic/pthread_attr.cpp \
bionic/pthread_detach.cpp \
bionic/pthread_equal.cpp \
bionic/pthread_getcpuclockid.cpp \
bionic/pthread_getschedparam.cpp \
bionic/pthread_internals.cpp \
bionic/pthread_join.cpp \
bionic/pthread_kill.cpp \
bionic/pthread_self.cpp \
bionic/pthread_setname_np.cpp \
bionic/pthread_setschedparam.cpp \
bionic/pthread_sigmask.cpp \
bionic/raise.cpp \
bionic/sbrk.cpp \
bionic/scandir.cpp \
bionic/sched_getaffinity.cpp \
bionic/__set_errno.cpp \
bionic/setlocale.cpp \
bionic/signalfd.cpp \
bionic/sigwait.cpp \
bionic/statvfs.cpp \
bionic/__strcat_chk.cpp \
bionic/__strchr_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/strerror.cpp \
bionic/strerror_r.cpp \
@@ -297,13 +232,80 @@ libc_bionic_src_files := \
bionic/__strncpy_chk.cpp \
bionic/strsignal.cpp \
bionic/stubs.cpp \
bionic/sysconf.cpp \
bionic/tdestroy.cpp \
bionic/tmpfile.cpp \
bionic/__umask_chk.cpp \
bionic/__vsnprintf_chk.cpp \
bionic/__vsprintf_chk.cpp \
bionic/wait.cpp \
bionic/wchar.cpp \
libc_tzcode_src_files := \
tzcode/asctime.c \
tzcode/difftime.c \
tzcode/localtime.c \
tzcode/strftime.c \
tzcode/strptime.c \
libc_upstream_freebsd_src_files := \
upstream-freebsd/lib/libc/stdio/clrerr.c \
upstream-freebsd/lib/libc/stdio/fclose.c \
upstream-freebsd/lib/libc/stdio/fdopen.c \
upstream-freebsd/lib/libc/stdio/feof.c \
upstream-freebsd/lib/libc/stdio/ferror.c \
upstream-freebsd/lib/libc/stdio/fgetln.c \
upstream-freebsd/lib/libc/stdio/fgetpos.c \
upstream-freebsd/lib/libc/stdio/fgets.c \
upstream-freebsd/lib/libc/stdio/fileno.c \
upstream-freebsd/lib/libc/stdio/flags.c \
upstream-freebsd/lib/libc/stdio/fopen.c \
upstream-freebsd/lib/libc/stdio/fpurge.c \
upstream-freebsd/lib/libc/stdio/fputs.c \
upstream-freebsd/lib/libc/stdio/fsetpos.c \
upstream-freebsd/lib/libc/stdio/funopen.c \
upstream-freebsd/lib/libc/stdio/fwalk.c \
upstream-freebsd/lib/libc/stdio/fwrite.c \
upstream-freebsd/lib/libc/stdio/getc.c \
upstream-freebsd/lib/libc/stdio/getchar.c \
upstream-freebsd/lib/libc/stdio/makebuf.c \
upstream-freebsd/lib/libc/stdio/mktemp.c \
upstream-freebsd/lib/libc/stdio/putc.c \
upstream-freebsd/lib/libc/stdio/putchar.c \
upstream-freebsd/lib/libc/stdio/puts.c \
upstream-freebsd/lib/libc/stdio/putw.c \
upstream-freebsd/lib/libc/stdio/remove.c \
upstream-freebsd/lib/libc/stdio/rget.c \
upstream-freebsd/lib/libc/stdio/setbuf.c \
upstream-freebsd/lib/libc/stdio/setbuffer.c \
upstream-freebsd/lib/libc/stdio/setvbuf.c \
upstream-freebsd/lib/libc/stdio/tempnam.c \
upstream-freebsd/lib/libc/stdio/tmpnam.c \
upstream-freebsd/lib/libc/stdio/wsetup.c \
upstream-freebsd/lib/libc/stdlib/getopt_long.c \
upstream-freebsd/lib/libc/stdlib/qsort.c \
upstream-freebsd/lib/libc/stdlib/realpath.c \
upstream-freebsd/lib/libc/string/wcpcpy.c \
upstream-freebsd/lib/libc/string/wcpncpy.c \
upstream-freebsd/lib/libc/string/wcscasecmp.c \
upstream-freebsd/lib/libc/string/wcscspn.c \
upstream-freebsd/lib/libc/string/wcsdup.c \
upstream-freebsd/lib/libc/string/wcslcat.c \
upstream-freebsd/lib/libc/string/wcslcpy.c \
upstream-freebsd/lib/libc/string/wcsncasecmp.c \
upstream-freebsd/lib/libc/string/wcsncat.c \
upstream-freebsd/lib/libc/string/wcsncmp.c \
upstream-freebsd/lib/libc/string/wcsncpy.c \
upstream-freebsd/lib/libc/string/wcsnlen.c \
upstream-freebsd/lib/libc/string/wcspbrk.c \
upstream-freebsd/lib/libc/string/wcsspn.c \
upstream-freebsd/lib/libc/string/wcsstr.c \
upstream-freebsd/lib/libc/string/wcstok.c \
upstream-freebsd/lib/libc/string/wmemchr.c \
upstream-freebsd/lib/libc/string/wmemcpy.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
upstream-freebsd/lib/libc/string/wmemset.c \
libc_upstream_netbsd_src_files := \
upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
upstream-netbsd/common/lib/libc/inet/inet_addr.c \
@@ -311,6 +313,7 @@ libc_upstream_netbsd_src_files := \
upstream-netbsd/libc/gen/ftw.c \
upstream-netbsd/libc/gen/nftw.c \
upstream-netbsd/libc/gen/nice.c \
upstream-netbsd/libc/gen/popen.c \
upstream-netbsd/libc/gen/psignal.c \
upstream-netbsd/libc/gen/setjmperr.c \
upstream-netbsd/libc/gen/utime.c \
@@ -347,54 +350,31 @@ libc_upstream_netbsd_src_files := \
upstream-netbsd/libc/string/strxfrm.c \
upstream-netbsd/libc/unistd/killpg.c \
# The following files are common, but must be compiled
# with different C flags when building a static C library.
#
# The reason for this is the implementation of __get_tls()
# that will differ between the shared and static versions
# of the library.
#
# See comments in private/bionic_tls.h for more details.
#
# NOTE: bionic/pthread.c is added later to this list
# because it needs special handling on ARM, see
# below.
#
libc_static_common_src_files := \
bionic/__errno.c \
bionic/sysconf.cpp \
# Architecture specific source files go here
# =========================================================
ifeq ($(TARGET_ARCH),arm)
libc_common_src_files += \
arch-arm/bionic/abort_arm.S \
arch-arm/bionic/atomics_arm.c \
arch-arm/bionic/clone.S \
arch-arm/bionic/eabi.c \
arch-arm/bionic/_exit_with_stack_teardown.S \
arch-arm/bionic/ffs.S \
arch-arm/bionic/futex_arm.S \
arch-arm/bionic/__get_sp.S \
arch-arm/bionic/kill.S \
arch-arm/bionic/libgcc_compat.c \
arch-arm/bionic/memcmp16.S \
arch-arm/bionic/memcmp.S \
arch-arm/bionic/memcpy.S \
arch-arm/bionic/memset.S \
arch-arm/bionic/_setjmp.S \
arch-arm/bionic/setjmp.S \
arch-arm/bionic/sigsetjmp.S \
arch-arm/bionic/strcmp.S \
arch-arm/bionic/strcpy.S \
arch-arm/bionic/strlen.c.arm \
arch-arm/bionic/syscall.S \
arch-arm/bionic/tgkill.S \
arch-arm/bionic/tkill.S \
bionic/memmove.c.arm \
bionic/socketcalls.c \
string/bcopy.c \
string/strncmp.c \
string/strcat.c \
string/strncat.c \
string/strncpy.c \
bionic/strchr.cpp \
string/strrchr.c \
bionic/memchr.c \
bionic/memrchr.c \
string/index.c \
bionic/strnlen.c \
string/strlcat.c \
string/strlcpy.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
upstream-freebsd/lib/libc/string/wcscpy.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wcslen.c \
upstream-freebsd/lib/libc/string/wcscat.c
# These files need to be arm so that gdbserver
# can set breakpoints in them without messing
@@ -407,85 +387,50 @@ libc_common_src_files += \
libc_static_common_src_files += \
bionic/pthread.c.arm \
bionic/pthread_create.cpp.arm \
bionic/pthread_key.cpp.arm \
# these are used by the static and dynamic versions of the libc
# respectively
libc_arch_static_src_files := \
arch-arm/bionic/exidx_static.c
libc_arch_dynamic_src_files := \
arch-arm/bionic/exidx_dynamic.c
endif # arm
ifeq ($(TARGET_ARCH),x86)
libc_common_src_files += \
arch-x86/bionic/__get_sp.S \
arch-x86/bionic/__get_tls.c \
arch-x86/bionic/__set_tls.c \
arch-x86/bionic/clone.S \
arch-x86/bionic/_exit_with_stack_teardown.S \
arch-x86/bionic/futex_x86.S \
arch-x86/bionic/setjmp.S \
arch-x86/bionic/_setjmp.S \
arch-x86/bionic/sigsetjmp.S \
arch-x86/bionic/vfork.S \
arch-x86/bionic/syscall.S \
arch-x86/string/bcopy_wrapper.S \
arch-x86/string/memcpy_wrapper.S \
arch-x86/string/memmove_wrapper.S \
arch-x86/string/bzero_wrapper.S \
arch-x86/string/memcmp_wrapper.S \
arch-x86/string/memset_wrapper.S \
arch-x86/string/strcmp_wrapper.S \
arch-x86/string/strncmp_wrapper.S \
arch-x86/string/strlen_wrapper.S \
string/strcpy.c \
bionic/pthread-atfork.c \
bionic/pthread-rwlocks.c \
bionic/pthread-timers.c \
bionic/ptrace.c
bionic/pthread-atfork.c \
bionic/pthread-rwlocks.c \
bionic/pthread-timers.c \
bionic/ptrace.c \
libc_static_common_src_files += \
bionic/pthread.c \
bionic/pthread_create.cpp \
bionic/pthread_key.cpp \
libc_arch_static_src_files := \
bionic/dl_iterate_phdr_static.c
libc_arch_dynamic_src_files :=
endif # x86
ifeq ($(TARGET_ARCH),mips)
libc_common_src_files += \
arch-mips/bionic/__get_sp.S \
arch-mips/bionic/__get_tls.c \
arch-mips/bionic/__set_tls.c \
arch-mips/bionic/_exit_with_stack_teardown.S \
arch-mips/bionic/_setjmp.S \
arch-mips/bionic/futex_mips.S \
arch-mips/bionic/bzero.S \
arch-mips/bionic/cacheflush.c \
arch-mips/bionic/clone.S \
arch-mips/bionic/ffs.S \
arch-mips/bionic/memcmp16.S \
arch-mips/bionic/memmove.c \
arch-mips/bionic/pipe.S \
arch-mips/bionic/setjmp.S \
arch-mips/bionic/sigsetjmp.S \
arch-mips/bionic/vfork.S
libc_common_src_files += \
arch-mips/string/memset.S \
arch-mips/string/memcpy.S \
arch-mips/string/mips_strlen.c
libc_common_src_files += \
bionic/memcmp.c \
string/bcopy.c \
string/strcmp.c \
string/strcpy.c \
string/strncmp.c
string/strncmp.c \
string/strcat.c \
string/strncat.c \
string/strncpy.c \
bionic/strchr.cpp \
string/strrchr.c \
bionic/memchr.c \
bionic/memrchr.c \
string/index.c \
bionic/strnlen.c \
string/strlcat.c \
string/strlcpy.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
upstream-freebsd/lib/libc/string/wcscpy.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wcslen.c \
upstream-freebsd/lib/libc/string/wcscat.c
libc_common_src_files += \
bionic/pthread-atfork.c \
@@ -495,14 +440,45 @@ libc_common_src_files += \
libc_static_common_src_files += \
bionic/pthread.c \
bionic/pthread_create.cpp \
bionic/pthread_key.cpp \
libc_arch_static_src_files := \
bionic/dl_iterate_phdr_static.c
libc_arch_dynamic_src_files :=
endif # mips
ifeq ($(strip $(TARGET_CPU_VARIANT)),)
$(warning TARGET_CPU_VARIANT is not defined)
endif
###########################################################
## Add cpu specific source files.
##
## This can be called multiple times, but it will only add
## the first source file for each unique $(1).
## $(1): Unique identifier to identify the cpu variant
## implementation.
## $(2): Cpu specific source file.
###########################################################
define libc-add-cpu-variant-src
$(if $(filter true,$(_LIBC_ARCH_CPU_VARIANT_HAS_$(1))), \
, \
$(eval _LIBC_ARCH_CPU_VARIANT_HAS_$(1) := true) \
$(eval _LIBC_ARCH_CPU_VARIANT_SRC_FILE.$(1) := $(2)) \
$(eval _LIBC_ARCH_CPU_VARIANT_SRC_FILES += $(2)) \
)
endef
_LIBC_ARCH_COMMON_SRC_FILES :=
_LIBC_ARCH_CPU_VARIANT_SRC_FILES :=
_LIBC_ARCH_STATIC_SRC_FILES :=
_LIBC_ARCH_DYNAMIC_SRC_FILES :=
include bionic/libc/arch-$(TARGET_ARCH)/$(TARGET_ARCH).mk
libc_common_src_files += $(_LIBC_ARCH_COMMON_SRC_FILES)
libc_common_src_files += $(_LIBC_ARCH_CPU_VARIANT_SRC_FILES)
libc_arch_static_src_files := $(_LIBC_ARCH_STATIC_SRC_FILES)
libc_arch_dynamic_src_files := $(_LIBC_ARCH_DYNAMIC_SRC_FILES)
# Define some common cflags
# ========================================================
libc_common_cflags := \
@@ -516,14 +492,6 @@ libc_common_cflags := \
-DLOG_ON_HEAP_ERROR \
-Wall -Wextra
# these macro definitions are required to implement the
# 'timezone' and 'daylight' global variables, as well as
# properly update the 'tm_gmtoff' field in 'struct tm'.
#
libc_common_cflags += \
-DTM_GMTOFF=tm_gmtoff \
-DUSG_COMPAT=1
ifeq ($(strip $(DEBUG_BIONIC_LIBC)),true)
libc_common_cflags += -DDEBUG
endif
@@ -539,27 +507,6 @@ ifeq ($(TARGET_ARCH),arm)
libc_common_cflags += -DSOFTFLOAT
libc_common_cflags += -fstrict-aliasing
libc_crt_target_cflags := -mthumb-interwork
#
# Define HAVE_ARM_TLS_REGISTER macro to indicate to the C library
# that it should access the hardware TLS register directly in
# private/bionic_tls.h
#
# The value must match your kernel configuration
#
ifeq ($(ARCH_ARM_HAVE_TLS_REGISTER),true)
libc_common_cflags += -DHAVE_ARM_TLS_REGISTER
endif
#
# Define HAVE_32_BYTE_CACHE_LINES to indicate to C
# library it should use to 32-byte version of memcpy, and not
# the 64-byte version.
#
ifeq ($(ARCH_ARM_HAVE_32_BYTE_CACHE_LINES),true)
libc_common_cflags += -DHAVE_32_BYTE_CACHE_LINE
endif
ifeq ($(ARCH_ARM_USE_NON_NEON_MEMCPY),true)
libc_common_cflags += -DARCH_ARM_USE_NON_NEON_MEMCPY
endif
endif # !arm
ifeq ($(TARGET_ARCH),x86)
@@ -622,23 +569,17 @@ libc_crt_target_cflags += \
# static C++ destructors are properly called on dlclose().
#
ifeq ($(TARGET_ARCH),arm)
libc_crtbegin_extension := c
libc_crt_target_so_cflags :=
endif
ifeq ($(TARGET_ARCH),mips)
libc_crtbegin_extension := S
libc_crt_target_so_cflags := -fPIC
endif
ifeq ($(TARGET_ARCH),x86)
libc_crtbegin_extension := c
libc_crt_target_so_cflags := -fPIC
endif
ifeq ($(libc_crtbegin_extension),)
$(error $(TARGET_ARCH) not supported)
endif
libc_crt_target_so_cflags += $(libc_crt_target_cflags)
libc_crt_target_crtbegin_file := $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin.$(libc_crtbegin_extension)
libc_crt_target_crtbegin_so_file := $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin_so.$(libc_crtbegin_extension)
libc_crt_target_crtbegin_file := $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin.c
libc_crt_target_crtbegin_so_file := $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin_so.c
# See the comment in crtbrand.c for the reason why we need to generate
# crtbrand.s before generating crtbrand.o.
@@ -744,8 +685,8 @@ WITH_MALLOC_CHECK_LIBC_A := $(strip $(WITH_MALLOC_CHECK_LIBC_A))
include $(CLEAR_VARS)
LOCAL_SRC_FILES := bionic/ssp.cpp
LOCAL_CFLAGS := $(libc_common_cflags) -fno-stack-protector
LOCAL_SRC_FILES := bionic/__stack_chk_fail.cpp
LOCAL_CFLAGS := $(libc_common_cflags) -fno-stack-protector -Werror
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libbionic_ssp
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
@@ -754,6 +695,51 @@ LOCAL_SYSTEM_SHARED_LIBRARIES :=
include $(BUILD_STATIC_LIBRARY)
# ========================================================
# libc_tzcode.a - upstream 'tzcode' code
# ========================================================
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(libc_tzcode_src_files)
LOCAL_CFLAGS := \
$(libc_common_cflags) \
-std=gnu99 \
-DSTD_INSPIRED=1 \
-DTZDIR=\"/system/usr/share/zoneinfo\" \
-DTM_GMTOFF=tm_gmtoff \
-DUSG_COMPAT=1
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_tzcode
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_SYSTEM_SHARED_LIBRARIES :=
include $(BUILD_STATIC_LIBRARY)
# ========================================================
# libc_freebsd.a - upstream FreeBSD C library code
# ========================================================
#
# These files are built with the freebsd-compat.h header file
# automatically included.
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(libc_upstream_freebsd_src_files)
LOCAL_CFLAGS := \
$(libc_common_cflags) \
-I$(LOCAL_PATH)/upstream-freebsd \
-I$(LOCAL_PATH)/upstream-freebsd/libc/include \
-include upstream-freebsd/freebsd-compat.h
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_freebsd
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_SYSTEM_SHARED_LIBRARIES :=
include $(BUILD_STATIC_LIBRARY)
# ========================================================
# libc_netbsd.a - upstream NetBSD C library code
# ========================================================
@@ -806,7 +792,12 @@ LOCAL_CFLAGS := $(libc_common_cflags) \
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_common
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_WHOLE_STATIC_LIBRARIES := libbionic_ssp libc_bionic libc_netbsd
LOCAL_WHOLE_STATIC_LIBRARIES := \
libbionic_ssp \
libc_bionic \
libc_freebsd \
libc_netbsd \
libc_tzcode
LOCAL_SYSTEM_SHARED_LIBRARIES :=
include $(BUILD_STATIC_LIBRARY)
@@ -817,10 +808,10 @@ include $(BUILD_STATIC_LIBRARY)
# ========================================================
#
# This is a version of the static C library that does not
# include malloc. It's useful in situations when calling
# the user wants to provide their own malloc implementation,
# or wants to explicitly disallow the use of the use of malloc,
# like the dynamic loader.
# include malloc. It's useful in situations when the user wants
# to provide their own malloc implementation, or wants to
# explicitly disallow the use of the use of malloc,
# such as in the dynamic loader.
include $(CLEAR_VARS)

View File

@@ -1673,38 +1673,6 @@ SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 1990, 1993
The Regents of the University of California. All rights reserved.
This code is derived from software contributed to Berkeley by
Chris Torek.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
4. Neither the name of the University nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 1990, 1993
The Regents of the University of California. All rights reserved.
@@ -2852,7 +2820,7 @@ are met:
2. Redistributions in binary form must reproduce the above copyright
notices, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
4. Neither the name of the University nor the names of its contributors
3. Neither the name of the University nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
@@ -3092,13 +3060,6 @@ are met:
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the NetBSD
Foundation, Inc. and its contributors.
4. Neither the name of The NetBSD Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
@@ -3916,6 +3877,36 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2010, 2011 Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2010, Intel Corporation
All rights reserved.
@@ -3946,6 +3937,36 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2011 Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2011 The Android Open Source Project
Copyright (c) 2008 ARM Ltd
All rights reserved.
@@ -3975,6 +3996,36 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2011, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2011, VMware, Inc.
All rights reserved.
@@ -4030,6 +4081,66 @@ SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2013 ARM Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c)1999 Citrus Project,
All rights reserved.

View File

@@ -1,54 +0,0 @@
Welcome to Bionic, Android's small and custom C library for the Android
platform.
Bionic is mainly a port of the BSD C library to our Linux kernel with the
following additions/changes:
- no support for locales
- no support for wide chars (i.e. multi-byte characters)
- its own smallish implementation of pthreads based on Linux futexes
- support for x86, ARM and ARM thumb CPU instruction sets and kernel interfaces
Bionic is released under the standard 3-clause BSD License
Bionic doesn't want to implement all features of a traditional C library, we only
add features to it as we need them, and we try to keep things as simple and small
as possible. Our goal is not to support scaling to thousands of concurrent threads
on multi-processors machines; we're running this on cell-phones, damnit !!
Note that Bionic doesn't provide a libthread_db or a libm implementation.
Adding new syscalls:
====================
Bionic provides the gensyscalls.py Python script to automatically generate syscall
stubs from the list defined in the file SYSCALLS.TXT. You can thus add a new syscall
by doing the following:
- edit SYSCALLS.TXT
- add a new line describing your syscall, it should look like:
return_type syscall_name(parameters) syscall_number
- in the event where you want to differentiate the syscall function from its entry name,
use the alternate:
return_type funcname:syscall_name(parameters) syscall_number
- additionally, if the syscall number is different between ARM and x86, use:
return_type funcname[:syscall_name](parameters) arm_number,x86_number
- a syscall number can be -1 to indicate that the syscall is not implemented on
a given platform, for example:
void __set_tls(void*) arm_number,-1
the comments in SYSCALLS.TXT contain more information about the line format
You can also use the 'checksyscalls.py' script to check that all the syscall
numbers you entered are correct. It does so by looking at the values defined in
your Linux kernel headers. The script indicates where the values are incorrect
and what is expected instead.

View File

@@ -1,13 +1,11 @@
# this file is used to list all the syscalls that will be supported by
# the Bionic C library. It is used to automatically generate the syscall
# stubs, the list of syscall constants (__NR_xxxx) and the content of <linux/_unistd.h>
# This file is used to automatically generate bionic's the system calls stubs.
#
# each non comment line has the following format:
# Each non comment line has the following format:
#
# return_type func_name[:syscall_name[:call_id]]([parameter_list]) (syscall_number|"stub")
# return_type func_name[:syscall_name[:call_id]]([parameter_list]) (1|-1|"stub")
#
# note that:
# - syscall_name correspond to the name of the syscall, which may differ from
# Note that:
# - syscall_name corresponds to the name of the syscall, which may differ from
# the exported function name (example: the exit syscall is implemented by the _exit()
# function, which is not the same as the standard C exit() function which calls it)
# The call_id parameter, given that func_name and syscall_name have
@@ -18,299 +16,311 @@
# - each parameter type is assumed to be stored on 32 bits, there is no plan to support
# 64-bit architectures at the moment
#
# - it there is "stub" instead of a syscall number, the tool will not generate any
# assembler template for the syscall; it's up to the bionic implementation to provide
# a relevant C stub
# - the final field can be "1", meaning: generate a stub for each architecture,
# taking the constants from the kernel header files.
#
# - additionally, if the syscall number is different amoung ARM, and x86, MIPS use:
# return_type funcname[:syscall_name](parameters) arm_number,x86_number,mips_number
# - the final field can be "stub" meaning: do not generate any stubs ---
# in this case, a hand-written custom stub must be provided.
# TODO: replace this with something like "custom" or "none", or remove
# it entirely.
#
# the file is processed by a python script named gensyscalls.py
# - the final field can be a three-element list of 1s and -1 meaning:
# this system call is only available on some of the architectures (1),
# and no stub should be generated for those architectures marked with -1.
# the order is arm,x86,mips.
# TODO: replace this with something more readable like "-arm,-mips" (meaning x86 only).
#
# This file is processed by a python script named gensyscalls.py.
#
# process management
void _exit:exit_group (int) 248,252,246
void _exit:exit_group (int) 1
void _exit_thread:exit (int) 1
pid_t __fork:fork (void) 2
pid_t _waitpid:waitpid (pid_t, int*, int, struct rusage*) -1,7,7
int __waitid:waitid(int, pid_t, struct siginfo_t*, int,void*) 280,284,278
pid_t __fork:fork (void) 1
pid_t _waitpid:waitpid (pid_t, int*, int, struct rusage*) -1,1,1
int __waitid:waitid(int, pid_t, struct siginfo_t*, int,void*) 1
pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage) 1
# NOTE: this system call is never called directly, but we list it there
# to have __NR_clone properly defined.
#
pid_t __sys_clone:clone (int, void*, int*, void*, int*) 120
pid_t __sys_clone:clone (int, void*, int*, void*, int*) 1
int execve (const char*, char* const*, char* const*) 11
int execve (const char*, char* const*, char* const*) 1
int __setuid:setuid32 (uid_t) 213,213,-1
int __setuid:setuid (uid_t) -1,-1,23
uid_t getuid:getuid32 () 199,199,-1
uid_t getuid:getuid () -1,-1,24
gid_t getgid:getgid32 () 200,200,-1
gid_t getgid:getgid () -1,-1,47
uid_t geteuid:geteuid32 () 201,201,-1
uid_t geteuid:geteuid () -1,-1,49
gid_t getegid:getegid32 () 202,202,-1
gid_t getegid:getegid () -1,-1,50
uid_t getresuid:getresuid32 (uid_t *ruid, uid_t *euid, uid_t *suid) 209,209,-1
uid_t getresuid:getresuid (uid_t *ruid, uid_t *euid, uid_t *suid) -1,-1,186
gid_t getresgid:getresgid32 (gid_t *rgid, gid_t *egid, gid_t *sgid) 211,211,-1
gid_t getresgid:getresgid (gid_t *rgid, gid_t *egid, gid_t *sgid) -1,-1,191
pid_t gettid() 224,224,222
ssize_t readahead(int, off64_t, size_t) 225,225,223
int getgroups:getgroups32(int, gid_t *) 205,205,-1
int getgroups:getgroups(int, gid_t *) -1,-1,80
pid_t getpgid(pid_t) 132
pid_t getppid() 64
pid_t getsid(pid_t) 147,147,151
pid_t setsid() 66
int setgid:setgid32(gid_t) 214,214,-1
int setgid:setgid(gid_t) -1,-1,46
int __setuid:setuid32 (uid_t) 1,1,-1
int __setuid:setuid (uid_t) -1,-1,1
uid_t getuid:getuid32 () 1,1,-1
uid_t getuid:getuid () -1,-1,1
gid_t getgid:getgid32 () 1,1,-1
gid_t getgid:getgid () -1,-1,1
uid_t geteuid:geteuid32 () 1,1,-1
uid_t geteuid:geteuid () -1,-1,1
gid_t getegid:getegid32 () 1,1,-1
gid_t getegid:getegid () -1,-1,1
uid_t getresuid:getresuid32 (uid_t *ruid, uid_t *euid, uid_t *suid) 1,1,-1
uid_t getresuid:getresuid (uid_t *ruid, uid_t *euid, uid_t *suid) -1,-1,1
gid_t getresgid:getresgid32 (gid_t *rgid, gid_t *egid, gid_t *sgid) 1,1,-1
gid_t getresgid:getresgid (gid_t *rgid, gid_t *egid, gid_t *sgid) -1,-1,1
pid_t gettid() 1
ssize_t readahead(int, off64_t, size_t) 1
int getgroups:getgroups32(int, gid_t *) 1,1,-1
int getgroups:getgroups(int, gid_t *) -1,-1,1
pid_t getpgid(pid_t) 1
pid_t getppid() 1
pid_t getsid(pid_t) 1
pid_t setsid() 1
int setgid:setgid32(gid_t) 1,1,-1
int setgid:setgid(gid_t) -1,-1,1
int seteuid:seteuid32(uid_t) stub
int __setreuid:setreuid32(uid_t, uid_t) 203,203,-1
int __setreuid:setreuid(uid_t, uid_t) -1,-1,70
int __setresuid:setresuid32(uid_t, uid_t, uid_t) 208,208,-1
int __setresuid:setresuid(uid_t, uid_t, uid_t) -1,-1,185
int setresgid:setresgid32(gid_t, gid_t, gid_t) 210,210,-1
int setresgid:setresgid(gid_t, gid_t, gid_t) -1,-1,190
void* __brk:brk(void*) 45
int __setreuid:setreuid32(uid_t, uid_t) 1,1,-1
int __setreuid:setreuid(uid_t, uid_t) -1,-1,1
int __setresuid:setresuid32(uid_t, uid_t, uid_t) 1,1,-1
int __setresuid:setresuid(uid_t, uid_t, uid_t) -1,-1,1
int setresgid:setresgid32(gid_t, gid_t, gid_t) 1,1,-1
int setresgid:setresgid(gid_t, gid_t, gid_t) -1,-1,1
void* __brk:brk(void*) 1
# see comments in arch-arm/bionic/kill.S to understand why we don't generate an ARM stub for kill/tkill
int kill(pid_t, int) -1,37,37
int tkill(pid_t tid, int sig) -1,238,236
int tgkill(pid_t tgid, pid_t tid, int sig) -1,270,266
int __ptrace:ptrace(int request, int pid, void* addr, void* data) 26
int __set_thread_area:set_thread_area(void* user_desc) -1,243,283
int __getpriority:getpriority(int, int) 96
int setpriority(int, int, int) 97
int setrlimit(int resource, const struct rlimit *rlp) 75
int getrlimit:ugetrlimit(int resource, struct rlimit *rlp) 191,191,-1
int getrlimit:getrlimit(int resource, struct rlimit *rlp) -1,-1,76
int getrusage(int who, struct rusage* r_usage) 77
int setgroups:setgroups32(int, const gid_t *) 206,206,-1
int setgroups:setgroups(int, const gid_t *) -1,-1,81
int kill(pid_t, int) -1,1,1
int tkill(pid_t tid, int sig) -1,1,1
int tgkill(pid_t tgid, pid_t tid, int sig) -1,1,1
int __ptrace:ptrace(int request, int pid, void* addr, void* data) 1
int __set_thread_area:set_thread_area(void* user_desc) -1,1,1
int __getpriority:getpriority(int, int) 1
int setpriority(int, int, int) 1
int setrlimit(int resource, const struct rlimit *rlp) 1
int getrlimit:ugetrlimit(int resource, struct rlimit *rlp) 1,1,-1
int getrlimit:getrlimit(int resource, struct rlimit *rlp) -1,-1,1
int getrusage(int who, struct rusage* r_usage) 1
int setgroups:setgroups32(int, const gid_t *) 1,1,-1
int setgroups:setgroups(int, const gid_t *) -1,-1,1
pid_t getpgrp(void) stub
int setpgid(pid_t, pid_t) 57
pid_t vfork(void) 190,-1,-1
int setregid:setregid32(gid_t, gid_t) 204,204,-1
int setregid:setregid(gid_t, gid_t) -1,-1,71
int chroot(const char *) 61
int setpgid(pid_t, pid_t) 1
pid_t vfork(void) 1,-1,-1
int setregid:setregid32(gid_t, gid_t) 1,1,-1
int setregid:setregid(gid_t, gid_t) -1,-1,1
int chroot(const char *) 1
# IMPORTANT: Even though <sys/prctl.h> declares prctl(int,...), the syscall stub must take 6 arguments
# to match the kernel implementation.
int prctl(int option, unsigned int arg2, unsigned int arg3, unsigned int arg4, unsigned int arg5) 172,172,192
int capget(cap_user_header_t header, cap_user_data_t data) 184,184,204
int capset(cap_user_header_t header, const cap_user_data_t data) 185,185,205
int sigaltstack(const stack_t*, stack_t*) 186,186,206
int acct(const char* filepath) 51
int prctl(int option, unsigned int arg2, unsigned int arg3, unsigned int arg4, unsigned int arg5) 1
int capget(cap_user_header_t header, cap_user_data_t data) 1
int capset(cap_user_header_t header, const cap_user_data_t data) 1
int sigaltstack(const stack_t*, stack_t*) 1
int acct(const char* filepath) 1
# file descriptors
ssize_t read (int, void*, size_t) 3
ssize_t write (int, const void*, size_t) 4
ssize_t pread64 (int, void *, size_t, off64_t) 180,180,200
ssize_t pwrite64 (int, void *, size_t, off64_t) 181,181,201
int __open:open (const char*, int, mode_t) 5
int __openat:openat (int, const char*, int, mode_t) 322,295,288
int close (int) 6
ssize_t read (int, void*, size_t) 1
ssize_t write (int, const void*, size_t) 1
ssize_t pread64 (int, void *, size_t, off64_t) 1
ssize_t pwrite64 (int, void *, size_t, off64_t) 1
int __open:open (const char*, int, mode_t) 1
int __openat:openat (int, const char*, int, mode_t) 1
int close (int) 1
int creat(const char*, mode_t) stub
off_t lseek(int, off_t, int) 19
int __llseek:_llseek (int, unsigned long, unsigned long, loff_t*, int) 140
pid_t getpid () 20
off_t lseek(int, off_t, int) 1
int __llseek:_llseek (int, unsigned long, unsigned long, loff_t*, int) 1
pid_t getpid () 1
void * mmap(void *, size_t, int, int, int, long) stub
void * __mmap2:mmap2(void*, size_t, int, int, int, long) 192,192,210
int munmap(void *, size_t) 91
void * mremap(void *, size_t, size_t, unsigned long) 163,163,167
int msync(const void *, size_t, int) 144
int mprotect(const void *, size_t, int) 125
int madvise(const void *, size_t, int) 220,219,218
int mlock(const void *addr, size_t len) 150,150,154
int munlock(const void *addr, size_t len) 151,151,155
int mlockall(int flags) 152,152,156
int munlockall() 153,153,157
int mincore(void* start, size_t length, unsigned char* vec) 219,218,217
int __ioctl:ioctl(int, int, void *) 54
int readv(int, const struct iovec *, int) 145
int writev(int, const struct iovec *, int) 146
int __fcntl:fcntl(int, int, void*) 55
int flock(int, int) 143
int fchmod(int, mode_t) 94
int dup(int) 41
int pipe(int *) 42,42,-1
int pipe2(int *, int) 359,331,328
int dup2(int, int) 63
int select:_newselect(int, struct fd_set *, struct fd_set *, struct fd_set *, struct timeval *) 142
int ftruncate(int, off_t) 93
int ftruncate64(int, off64_t) 194,194,212
int getdents:getdents64(unsigned int, struct dirent *, unsigned int) 217,220,219
int fsync(int) 118
int fdatasync(int) 148,148,152
int fchown:fchown32(int, uid_t, gid_t) 207,207,-1
int fchown:fchown(int, uid_t, gid_t) -1,-1,95
void sync(void) 36
int __fcntl64:fcntl64(int, int, void *) 221,221,220
int __fstatfs64:fstatfs64(int, size_t, struct statfs *) 267,269,256
ssize_t sendfile(int out_fd, int in_fd, off_t *offset, size_t count) 187,187,207
int fstatat:fstatat64(int dirfd, const char *path, struct stat *buf, int flags) 327,300,293
int mkdirat(int dirfd, const char *pathname, mode_t mode) 323,296,289
int fchownat(int dirfd, const char *path, uid_t owner, gid_t group, int flags) 325,298,291
int fchmodat(int dirfd, const char *path, mode_t mode, int flags) 333,306,299
int renameat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath) 329,302,295
int fsetxattr(int, const char *, const void *, size_t, int) 228,228,226
ssize_t fgetxattr(int, const char *, void *, size_t) 231,231,229
ssize_t flistxattr(int, char *, size_t) 234,234,232
int fremovexattr(int, const char *) 237,237,235
void * __mmap2:mmap2(void*, size_t, int, int, int, long) 1
int munmap(void *, size_t) 1
void * mremap(void *, size_t, size_t, unsigned long) 1
int msync(const void *, size_t, int) 1
int mprotect(const void *, size_t, int) 1
int madvise(const void *, size_t, int) 1
int mlock(const void *addr, size_t len) 1
int munlock(const void *addr, size_t len) 1
int mlockall(int flags) 1
int munlockall() 1
int mincore(void* start, size_t length, unsigned char* vec) 1
int __ioctl:ioctl(int, int, void *) 1
int readv(int, const struct iovec *, int) 1
int writev(int, const struct iovec *, int) 1
int __fcntl:fcntl(int, int, void*) 1
int flock(int, int) 1
int fchmod(int, mode_t) 1
int dup(int) 1
int pipe(int *) 1,1,-1
int pipe2(int *, int) 1
int dup2(int, int) 1
int select:_newselect(int, struct fd_set *, struct fd_set *, struct fd_set *, struct timeval *) 1
int ftruncate(int, off_t) 1
int ftruncate64(int, off64_t) 1
int getdents:getdents64(unsigned int, struct dirent *, unsigned int) 1
int fsync(int) 1
int fdatasync(int) 1
int fchown:fchown32(int, uid_t, gid_t) 1,1,-1
int fchown:fchown(int, uid_t, gid_t) -1,-1,1
void sync(void) 1
int __fcntl64:fcntl64(int, int, void *) 1
int __fstatfs64:fstatfs64(int, size_t, struct statfs *) 1
ssize_t sendfile(int out_fd, int in_fd, off_t *offset, size_t count) 1
int fstatat:fstatat64(int dirfd, const char *path, struct stat *buf, int flags) 1
int mkdirat(int dirfd, const char *pathname, mode_t mode) 1
int fchownat(int dirfd, const char *path, uid_t owner, gid_t group, int flags) 1
int fchmodat(int dirfd, const char *path, mode_t mode, int flags) 1
int renameat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath) 1
int fsetxattr(int, const char *, const void *, size_t, int) 1
ssize_t fgetxattr(int, const char *, void *, size_t) 1
ssize_t flistxattr(int, char *, size_t) 1
int fremovexattr(int, const char *) 1
# file system
int link (const char*, const char*) 9
int unlink (const char*) 10
int unlinkat (int, const char *, int) 328,301,294
int chdir (const char*) 12
int mknod (const char*, mode_t, dev_t) 14
int chmod (const char*,mode_t) 15
int chown:chown32(const char *, uid_t, gid_t) 212,212,-1
int chown:chown(const char *, uid_t, gid_t) -1,-1,202
int lchown:lchown32 (const char*, uid_t, gid_t) 198,198,-1
int lchown:lchown (const char*, uid_t, gid_t) -1,-1,16
int mount (const char*, const char*, const char*, unsigned long, const void*) 21
int link (const char*, const char*) 1
int unlink (const char*) 1
int unlinkat (int, const char *, int) 1
int chdir (const char*) 1
int mknod (const char*, mode_t, dev_t) 1
int chmod (const char*,mode_t) 1
int chown:chown32(const char *, uid_t, gid_t) 1,1,-1
int chown:chown(const char *, uid_t, gid_t) -1,-1,1
int lchown:lchown32 (const char*, uid_t, gid_t) 1,1,-1
int lchown:lchown (const char*, uid_t, gid_t) -1,-1,1
int mount (const char*, const char*, const char*, unsigned long, const void*) 1
int umount(const char*) stub
int umount2 (const char*, int) 52
int fstat:fstat64(int, struct stat*) 197,197,215
int stat:stat64(const char *, struct stat *) 195,195,213
int lstat:lstat64(const char *, struct stat *) 196,196,214
int mkdir(const char *, mode_t) 39
int readlink(const char *, char *, size_t) 85
int rmdir(const char *) 40
int rename(const char *, const char *) 38
int __getcwd:getcwd(char * buf, size_t size) 183,183,203
int access(const char *, int) 33
int faccessat(int, const char *, int, int) 334,307,300
int symlink(const char *, const char *) 83
int fchdir(int) 133
int truncate(const char*, off_t) 92
int setxattr(const char *, const char *, const void *, size_t, int) 226,226,224
int lsetxattr(const char *, const char *, const void *, size_t, int) 227,227,225
ssize_t getxattr(const char *, const char *, void *, size_t) 229,229,227
ssize_t lgetxattr(const char *, const char *, void *, size_t) 230,230,228
ssize_t listxattr(const char *, char *, size_t) 232,232,230
ssize_t llistxattr(const char *, char *, size_t) 233,233,231
int removexattr(const char *, const char *) 235,235,233
int lremovexattr(const char *, const char *) 236,236,234
int __statfs64:statfs64(const char *, size_t, struct statfs *) 266,268,255
long unshare(unsigned long) 337,310,303
int umount2 (const char*, int) 1
int fstat:fstat64(int, struct stat*) 1
int stat:stat64(const char *, struct stat *) 1
int lstat:lstat64(const char *, struct stat *) 1
int mkdir(const char *, mode_t) 1
int readlink(const char *, char *, size_t) 1
int rmdir(const char *) 1
int rename(const char *, const char *) 1
int __getcwd:getcwd(char * buf, size_t size) 1
int access(const char *, int) 1
int faccessat(int, const char *, int, int) 1
int symlink(const char *, const char *) 1
int fchdir(int) 1
int truncate(const char*, off_t) 1
int setxattr(const char *, const char *, const void *, size_t, int) 1
int lsetxattr(const char *, const char *, const void *, size_t, int) 1
ssize_t getxattr(const char *, const char *, void *, size_t) 1
ssize_t lgetxattr(const char *, const char *, void *, size_t) 1
ssize_t listxattr(const char *, char *, size_t) 1
ssize_t llistxattr(const char *, char *, size_t) 1
int removexattr(const char *, const char *) 1
int lremovexattr(const char *, const char *) 1
int __statfs64:statfs64(const char *, size_t, struct statfs *) 1
long unshare(unsigned long) 1
int swapon(const char *, int) 1
int swapoff(const char *) 1
# time
int pause () 29
int gettimeofday(struct timeval*, struct timezone*) 78
int settimeofday(const struct timeval*, const struct timezone*) 79
clock_t times(struct tms *) 43
int nanosleep(const struct timespec *, struct timespec *) 162,162,166
int clock_gettime(clockid_t clk_id, struct timespec *tp) 263,265,263
int clock_settime(clockid_t clk_id, const struct timespec *tp) 262,264,262
int clock_getres(clockid_t clk_id, struct timespec *res) 264,266,264
int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *req, struct timespec *rem) 265,267,265
int getitimer(int, const struct itimerval *) 105
int setitimer(int, const struct itimerval *, struct itimerval *) 104
int __timer_create:timer_create(clockid_t clockid, struct sigevent *evp, timer_t *timerid) 257,259,257
int __timer_settime:timer_settime(timer_t, int, const struct itimerspec*, struct itimerspec*) 258,260,258
int __timer_gettime:timer_gettime(timer_t, struct itimerspec*) 259,261,259
int __timer_getoverrun:timer_getoverrun(timer_t) 260,262,260
int __timer_delete:timer_delete(timer_t) 261,263,261
int utimes(const char*, const struct timeval tvp[2]) 269,271,267
int utimensat(int, const char *, const struct timespec times[2], int) 348,320,316
int pause () 1
int gettimeofday(struct timeval*, struct timezone*) 1
int settimeofday(const struct timeval*, const struct timezone*) 1
clock_t times(struct tms *) 1
int nanosleep(const struct timespec *, struct timespec *) 1
int clock_gettime(clockid_t clk_id, struct timespec *tp) 1
int clock_settime(clockid_t clk_id, const struct timespec *tp) 1
int clock_getres(clockid_t clk_id, struct timespec *res) 1
int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *req, struct timespec *rem) 1
int getitimer(int, const struct itimerval *) 1
int setitimer(int, const struct itimerval *, struct itimerval *) 1
int __timer_create:timer_create(clockid_t clockid, struct sigevent *evp, timer_t *timerid) 1
int __timer_settime:timer_settime(timer_t, int, const struct itimerspec*, struct itimerspec*) 1
int __timer_gettime:timer_gettime(timer_t, struct itimerspec*) 1
int __timer_getoverrun:timer_getoverrun(timer_t) 1
int __timer_delete:timer_delete(timer_t) 1
int utimes(const char*, const struct timeval tvp[2]) 1
int utimensat(int, const char *, const struct timespec times[2], int) 1
int timerfd_create(clockid_t, int) 1
int timerfd_settime(int, int, const struct itimerspec *, struct itimerspec *) 1
int timerfd_gettime(int, struct itimerspec *) 1
# signals
int sigaction(int, const struct sigaction *, struct sigaction *) 67
int sigprocmask(int, const sigset_t *, sigset_t *) 126
int __sigsuspend:sigsuspend(int unused1, int unused2, unsigned mask) 72,72,-1
int __sigsuspend:sigsuspend(const sigset_t *mask) -1,-1,72
int __rt_sigaction:rt_sigaction (int sig, const struct sigaction *act, struct sigaction *oact, size_t sigsetsize) 174,174,194
int __rt_sigprocmask:rt_sigprocmask (int how, const sigset_t *set, sigset_t *oset, size_t sigsetsize) 175,175,195
int __rt_sigtimedwait:rt_sigtimedwait(const sigset_t *set, struct siginfo_t *info, struct timespec_t *timeout, size_t sigset_size) 177,177,197
int sigpending(sigset_t *) 73
int signalfd4(int fd, const sigset_t *mask, size_t sizemask, int flags) 355,327,324
int sigaction(int, const struct sigaction *, struct sigaction *) 1
int sigprocmask(int, const sigset_t *, sigset_t *) 1
int __sigsuspend:sigsuspend(int unused1, int unused2, unsigned mask) 1,1,-1
int __sigsuspend:sigsuspend(const sigset_t *mask) -1,-1,1
int __rt_sigaction:rt_sigaction (int sig, const struct sigaction *act, struct sigaction *oact, size_t sigsetsize) 1
int __rt_sigprocmask:rt_sigprocmask (int how, const sigset_t *set, sigset_t *oset, size_t sigsetsize) 1
int __rt_sigtimedwait:rt_sigtimedwait(const sigset_t *set, struct siginfo_t *info, struct timespec_t *timeout, size_t sigset_size) 1
int sigpending(sigset_t *) 1
int signalfd4(int fd, const sigset_t *mask, size_t sizemask, int flags) 1
# sockets
int socket(int, int, int) 281,-1,183
int socketpair(int, int, int, int*) 288,-1,184
int bind(int, struct sockaddr *, int) 282,-1,169
int connect(int, struct sockaddr *, socklen_t) 283,-1,170
int listen(int, int) 284,-1,174
int accept(int, struct sockaddr *, socklen_t *) 285,-1,168
int getsockname(int, struct sockaddr *, socklen_t *) 286,-1,172
int getpeername(int, struct sockaddr *, socklen_t *) 287,-1,171
int sendto(int, const void *, size_t, int, const struct sockaddr *, socklen_t) 290,-1,180
int recvfrom(int, void *, size_t, unsigned int, struct sockaddr *, socklen_t *) 292,-1,176
int shutdown(int, int) 293,-1,182
int setsockopt(int, int, int, const void *, socklen_t) 294,-1,181
int getsockopt(int, int, int, void *, socklen_t *) 295,-1,173
int sendmsg(int, const struct msghdr *, unsigned int) 296,-1,179
int recvmsg(int, struct msghdr *, unsigned int) 297,-1,177
int socket(int, int, int) 1,-1,1
int socketpair(int, int, int, int*) 1,-1,1
int bind(int, struct sockaddr *, int) 1,-1,1
int connect(int, struct sockaddr *, socklen_t) 1,-1,1
int listen(int, int) 1,-1,1
int accept(int, struct sockaddr *, socklen_t *) 1,-1,1
int getsockname(int, struct sockaddr *, socklen_t *) 1,-1,1
int getpeername(int, struct sockaddr *, socklen_t *) 1,-1,1
int sendto(int, const void *, size_t, int, const struct sockaddr *, socklen_t) 1,-1,1
int recvfrom(int, void *, size_t, unsigned int, struct sockaddr *, socklen_t *) 1,-1,1
int shutdown(int, int) 1,-1,1
int setsockopt(int, int, int, const void *, socklen_t) 1,-1,1
int getsockopt(int, int, int, void *, socklen_t *) 1,-1,1
int sendmsg(int, const struct msghdr *, unsigned int) 1,-1,1
int recvmsg(int, struct msghdr *, unsigned int) 1,-1,1
# sockets for x86. These are done as an "indexed" call to socketcall syscall.
int socket:socketcall:1 (int, int, int) -1,102,-1
int bind:socketcall:2 (int, struct sockaddr *, int) -1,102,-1
int connect:socketcall:3(int, struct sockaddr *, socklen_t) -1,102,-1
int listen:socketcall:4(int, int) -1,102,-1
int accept:socketcall:5(int, struct sockaddr *, socklen_t *) -1,102,-1
int getsockname:socketcall:6(int, struct sockaddr *, socklen_t *) -1,102,-1
int getpeername:socketcall:7(int, struct sockaddr *, socklen_t *) -1,102,-1
int socketpair:socketcall:8(int, int, int, int*) -1,102,-1
int sendto:socketcall:11(int, const void *, size_t, int, const struct sockaddr *, socklen_t) -1,102,-1
int recvfrom:socketcall:12(int, void *, size_t, unsigned int, struct sockaddr *, socklen_t *) -1,102,-1
int shutdown:socketcall:13(int, int) -1,102,-1
int setsockopt:socketcall:14(int, int, int, const void *, socklen_t) -1,102,-1
int getsockopt:socketcall:15(int, int, int, void *, socklen_t *) -1,102,-1
int sendmsg:socketcall:16(int, const struct msghdr *, unsigned int) -1,102,-1
int recvmsg:socketcall:17(int, struct msghdr *, unsigned int) -1,102,-1
int socket:socketcall:1 (int, int, int) -1,1,-1
int bind:socketcall:2 (int, struct sockaddr *, int) -1,1,-1
int connect:socketcall:3(int, struct sockaddr *, socklen_t) -1,1,-1
int listen:socketcall:4(int, int) -1,1,-1
int accept:socketcall:5(int, struct sockaddr *, socklen_t *) -1,1,-1
int getsockname:socketcall:6(int, struct sockaddr *, socklen_t *) -1,1,-1
int getpeername:socketcall:7(int, struct sockaddr *, socklen_t *) -1,1,-1
int socketpair:socketcall:8(int, int, int, int*) -1,1,-1
int sendto:socketcall:11(int, const void *, size_t, int, const struct sockaddr *, socklen_t) -1,1,-1
int recvfrom:socketcall:12(int, void *, size_t, unsigned int, struct sockaddr *, socklen_t *) -1,1,-1
int shutdown:socketcall:13(int, int) -1,1,-1
int setsockopt:socketcall:14(int, int, int, const void *, socklen_t) -1,1,-1
int getsockopt:socketcall:15(int, int, int, void *, socklen_t *) -1,1,-1
int sendmsg:socketcall:16(int, const struct msghdr *, unsigned int) -1,1,-1
int recvmsg:socketcall:17(int, struct msghdr *, unsigned int) -1,1,-1
# scheduler & real-time
int sched_setscheduler(pid_t pid, int policy, const struct sched_param *param) 156,156,160
int sched_getscheduler(pid_t pid) 157,157,161
int sched_yield(void) 158,158,162
int sched_setparam(pid_t pid, const struct sched_param *param) 154,154,158
int sched_getparam(pid_t pid, struct sched_param *param) 155,155,159
int sched_get_priority_max(int policy) 159,159,163
int sched_get_priority_min(int policy) 160,160,164
int sched_rr_get_interval(pid_t pid, struct timespec *interval) 161,161,165
int sched_setaffinity(pid_t pid, size_t setsize, const cpu_set_t* set) 241,241,239
int __sched_getaffinity:sched_getaffinity(pid_t pid, size_t setsize, cpu_set_t* set) 242,242,240
int __getcpu:getcpu(unsigned *cpu, unsigned *node, void *unused) 345,318,312
int sched_setscheduler(pid_t pid, int policy, const struct sched_param *param) 1
int sched_getscheduler(pid_t pid) 1
int sched_yield(void) 1
int sched_setparam(pid_t pid, const struct sched_param *param) 1
int sched_getparam(pid_t pid, struct sched_param *param) 1
int sched_get_priority_max(int policy) 1
int sched_get_priority_min(int policy) 1
int sched_rr_get_interval(pid_t pid, struct timespec *interval) 1
int sched_setaffinity(pid_t pid, size_t setsize, const cpu_set_t* set) 1
int __sched_getaffinity:sched_getaffinity(pid_t pid, size_t setsize, cpu_set_t* set) 1
int __getcpu:getcpu(unsigned *cpu, unsigned *node, void *unused) 1
# io priorities
int ioprio_set(int which, int who, int ioprio) 314,289,314
int ioprio_get(int which, int who) 315,290,315
int ioprio_set(int which, int who, int ioprio) 1
int ioprio_get(int which, int who) 1
# other
int uname(struct utsname *) 122
pid_t __wait4:wait4(pid_t pid, int *status, int options, struct rusage *rusage) 114
mode_t umask(mode_t) 60
int __reboot:reboot(int, int, int, void *) 88
int __syslog:syslog(int, char *, int) 103
int init_module(void *, unsigned long, const char *) 128
int delete_module(const char*, unsigned int) 129
int klogctl:syslog(int, char *, int) 103
int sysinfo(struct sysinfo *) 116
int personality(unsigned long) 136
long perf_event_open(struct perf_event_attr *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags) 364,336,333
int uname(struct utsname *) 1
mode_t umask(mode_t) 1
int __reboot:reboot(int, int, int, void *) 1
int __syslog:syslog(int, char *, int) 1
int init_module(void *, unsigned long, const char *) 1
int delete_module(const char*, unsigned int) 1
int klogctl:syslog(int, char *, int) 1
int sysinfo(struct sysinfo *) 1
int personality(unsigned long) 1
long perf_event_open(struct perf_event_attr *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags) 1
# futex
int futex(void *, int, int, void *, void *, int) 240,240,238
int futex(void *, int, int, void *, void *, int) 1
# epoll
int epoll_create(int size) 250,254,248
int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) 251,255,249
int epoll_wait(int epfd, struct epoll_event *events, int max, int timeout) 252,256,250
int epoll_create(int size) 1
int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) 1
int epoll_wait(int epfd, struct epoll_event *events, int max, int timeout) 1
int inotify_init(void) 316,291,284
int inotify_add_watch(int, const char *, unsigned int) 317,292,285
int inotify_rm_watch(int, unsigned int) 318,293,286
int inotify_init(void) 1
int inotify_add_watch(int, const char *, unsigned int) 1
int inotify_rm_watch(int, unsigned int) 1
int poll(struct pollfd *, unsigned int, long) 168,168,188
int poll(struct pollfd *, unsigned int, long) 1
int eventfd:eventfd2(unsigned int, int) 356,328,325
int eventfd:eventfd2(unsigned int, int) 1
# ARM-specific ARM_NR_BASE == 0x0f0000 == 983040
int __set_tls:ARM_set_tls(void*) 983045,-1,-1
int cacheflush:ARM_cacheflush(long start, long end, long flags) 983042,-1,-1
int __set_tls:__ARM_NR_set_tls(void*) 1,-1,-1
int cacheflush:__ARM_NR_cacheflush(long start, long end, long flags) 1,-1,-1
# MIPS-specific
int _flush_cache:cacheflush(char *addr, const int nbytes, const int op) -1,-1,147
int syscall(int number,...) -1,-1,0
int _flush_cache:cacheflush(char *addr, const int nbytes, const int op) -1,-1,1
int syscall(int number,...) -1,-1,1

34
libc/arch-arm/arm.mk Normal file
View File

@@ -0,0 +1,34 @@
_LIBC_ARCH_COMMON_SRC_FILES := \
arch-arm/bionic/abort_arm.S \
arch-arm/bionic/atomics_arm.c \
arch-arm/bionic/clone.S \
arch-arm/bionic/eabi.c \
arch-arm/bionic/_exit_with_stack_teardown.S \
arch-arm/bionic/ffs.S \
arch-arm/bionic/futex_arm.S \
arch-arm/bionic/__get_sp.S \
arch-arm/bionic/kill.S \
arch-arm/bionic/libgcc_compat.c \
arch-arm/bionic/memcmp16.S \
arch-arm/bionic/memcmp.S \
arch-arm/bionic/_setjmp.S \
arch-arm/bionic/setjmp.S \
arch-arm/bionic/sigsetjmp.S \
arch-arm/bionic/strcpy.S \
arch-arm/bionic/syscall.S \
arch-arm/bionic/tgkill.S \
arch-arm/bionic/tkill.S \
# These are used by the static and dynamic versions of the libc
# respectively.
_LIBC_ARCH_STATIC_SRC_FILES := \
arch-arm/bionic/exidx_static.c
_LIBC_ARCH_DYNAMIC_SRC_FILES := \
arch-arm/bionic/exidx_dynamic.c
ifeq ($(strip $(wildcard bionic/libc/arch-arm/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk)),)
$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are cortex-a7, cortex-a8, cortex-a9, cortex-a15, krait. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
endif
include bionic/libc/arch-arm/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk

View File

@@ -27,7 +27,7 @@
*/
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
#include <asm/unistd.h>
// void _exit_with_stack_teardown(void* stackBase, int stackSize, int retCode)
ENTRY(_exit_with_stack_teardown)

View File

@@ -38,5 +38,5 @@
ENTRY(abort)
.save {r3, r14}
stmfd sp!, {r3, r14}
blx PIC_SYM(_C_LABEL(__libc_android_abort), PLT)
bl PIC_SYM(_C_LABEL(__libc_android_abort), PLT)
END(abort)

View File

@@ -27,7 +27,7 @@
*/
#include <sys/types.h>
#include <private/logd.h>
#include <private/libc_logging.h>
#include <stdio.h>
/*
@@ -52,7 +52,7 @@ atexit(void (*func)(void))
*/
static char const warning[] = "WARNING: generic atexit() called from legacy shared library\n";
__libc_android_log_write(ANDROID_LOG_WARN, "libc", warning);
__libc_format_log(ANDROID_LOG_WARN, "libc", warning);
fprintf(stderr, warning);
return (__cxa_atexit((void (*)(void *))func, NULL, NULL));

View File

@@ -26,36 +26,39 @@
* SUCH DAMAGE.
*/
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
#include <asm/unistd.h>
// int __pthread_clone(int (*fn)(void*), void* child_stack, int flags, void* arg);
// int __pthread_clone(void* (*fn)(void*), void* child_stack, int flags, void* arg);
ENTRY(__pthread_clone)
# Copy the args onto the new stack.
stmdb r1!, {r0, r3}
# Push 'fn' and 'arg' onto 'child_stack'.
stmdb r1!, {r0, r3}
# The sys_clone system call only takes two arguments: 'flags' and 'child_stack'.
# 'child_stack' is already in r1, but we need to move 'flags' into position.
mov r0, r2
stmfd sp!, {r4, r7}
# System call.
mov ip, r7
ldr r7, =__NR_clone
swi #0
# Child?
movs r0, r0
beq 1f
# In parent, reload saved registers then either exit or set errno.
ldmfd sp!, {r4, r7}
bxne lr
b __set_syscall_errno
# Parent.
mov r7, ip
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
1: # The child.
# pick the function arg and call address off the stack and jump
# to the C __thread_entry function which does some setup and then
# calls the thread's start function
1: # Child.
# Pop 'fn' and 'arg' back off the stack and call __thread_entry.
pop {r0, r1}
# __thread_entry needs the TLS pointer
# __thread_entry also needs our stack pointer.
mov r2, sp
b __thread_entry
END(__pthread_clone)
@@ -94,10 +97,12 @@ ENTRY(__bionic_clone)
movs r0, r0
beq 1f
# In the parent, reload saved registers then either exit or set errno.
# In the parent, reload saved registers then either return or set errno.
ldmfd sp!, {r4, r5, r6, r7}
bxne lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
1: # The child.
ldr r0, [sp, #-4]

View File

@@ -31,55 +31,11 @@
#include <machine/asm.h>
#include <machine/cpu-features.h>
/*
* ffs - find first set bit, this algorithm isolates the first set
* bit, then multiplies the number by 0x0450fbaf which leaves the top
* 6 bits as an index into the table. This algorithm should be a win
* over the checking each bit in turn as per the C compiled version.
*
* Some newer ARM architectures have an instruction named
* CLZ (count leading Zero's) that is used
*
* This is the ffs algorithm devised by d.seal and posted to comp.sys.arm on
* 16 Feb 1994.
*/
ENTRY(ffs)
/* Standard trick to isolate bottom bit in r0 or 0 if r0 = 0 on entry */
rsb r1, r0, #0
ands r0, r0, r1
#ifndef __ARM_HAVE_CLZ
/*
* now r0 has at most one set bit, call this X
* if X = 0, all further instructions are skipped
*/
adrne r2, .L_ffs_table
orrne r0, r0, r0, lsl #4 /* r0 = X * 0x11 */
orrne r0, r0, r0, lsl #6 /* r0 = X * 0x451 */
rsbne r0, r0, r0, lsl #16 /* r0 = X * 0x0450fbaf */
/* now lookup in table indexed on top 6 bits of r0 */
ldrneb r0, [ r2, r0, lsr #26 ]
bx lr
END(ffs)
.text;
.type .L_ffs_table, _ASM_TYPE_OBJECT;
.L_ffs_table:
/* 0 1 2 3 4 5 6 7 */
.byte 0, 1, 2, 13, 3, 7, 0, 14 /* 0- 7 */
.byte 4, 0, 8, 0, 0, 0, 0, 15 /* 8-15 */
.byte 11, 5, 0, 0, 9, 0, 0, 26 /* 16-23 */
.byte 0, 0, 0, 0, 0, 22, 28, 16 /* 24-31 */
.byte 32, 12, 6, 0, 0, 0, 0, 0 /* 32-39 */
.byte 10, 0, 0, 25, 0, 0, 21, 27 /* 40-47 */
.byte 31, 0, 0, 0, 0, 24, 0, 20 /* 48-55 */
.byte 30, 0, 23, 19, 29, 18, 17, 0 /* 56-63 */
#else /* !defined(__ARM_HAVE_CLZ) */
clzne r0, r0
rsbne r0, r0, #32
bx lr
END(ffs)
#endif /* !defined(__ARM_HAVE_CLZ) */

View File

@@ -26,7 +26,7 @@
* SUCH DAMAGE.
*/
#include <sys/linux-syscalls.h>
#include <asm/unistd.h>
#include <machine/asm.h>
#define FUTEX_WAIT 0
@@ -34,11 +34,10 @@
// __futex_syscall3(*ftx, op, val)
ENTRY(__futex_syscall3)
stmdb sp!, {r4, r7}
.save {r4, r7}
mov ip, r7
ldr r7, =__NR_futex
swi #0
ldmia sp!, {r4, r7}
mov r7, ip
bx lr
END(__futex_syscall3)
@@ -49,25 +48,23 @@ END(__futex_syscall4)
// __futex_wait(*ftx, val, *timespec)
ENTRY(__futex_wait)
stmdb sp!, {r4, r7}
.save {r4, r7}
mov ip, r7
mov r3, r2
mov r2, r1
mov r1, #FUTEX_WAIT
ldr r7, =__NR_futex
swi #0
ldmia sp!, {r4, r7}
mov r7, ip
bx lr
END(__futex_wait)
// __futex_wake(*ftx, counter)
ENTRY(__futex_wake)
.save {r4, r7}
stmdb sp!, {r4, r7}
mov ip, r7
mov r2, r1
mov r1, #FUTEX_WAKE
ldr r7, =__NR_futex
swi #0
ldmia sp!, {r4, r7}
mov r7, ip
bx lr
END(__futex_wake)

View File

@@ -25,6 +25,11 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/err.h>
#include <asm/unistd.h>
#include <machine/asm.h>
/* unlike our auto-generated syscall stubs, this code saves lr
on the stack, as well as a few other registers. this makes
our stack unwinder happy, when we generate debug stack
@@ -32,19 +37,14 @@
abort due to a fatal runtime error (e.g. detection
of a corrupted malloc heap).
*/
#include <sys/linux-syscalls.h>
#include <machine/asm.h>
#ifndef __NR_kill
#define __NR_kill 37
#endif
ENTRY(kill)
stmfd sp!, {r4-r7, ip, lr}
ldr r7, =__NR_kill
swi #0
ldmfd sp!, {r4-r7, ip, lr}
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(kill)

View File

@@ -115,7 +115,7 @@ ENTRY(memcmp)
* pointer somewhere else
*/
mov r4, r0
/* align first pointer to word boundary
* offset = -src & 3
*/
@@ -151,7 +151,7 @@ ENTRY(memcmp)
ldr ip, [r1]
subs r2, r2, #(32 + 4)
bmi 1f
0: pld [r4, #(CACHE_LINE_SIZE * 2)]
pld [r1, #(CACHE_LINE_SIZE * 2)]
ldr r0, [r4], #4
@@ -178,14 +178,14 @@ ENTRY(memcmp)
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
bne 2f
bne 2f
subs r2, r2, #32
bhs 0b
/* do we have at least 4 bytes left? */
1: adds r2, r2, #(32 - 4 + 4)
bmi 4f
/* finish off 4 bytes at a time */
3: ldr r0, [r4], #4
ldr ip, [r1], #4
@@ -233,17 +233,14 @@ ENTRY(memcmp)
subs r2, r2, #1
bne 11b
bx lr
END(memcmp)
5: /*************** non-congruent case ***************/
and r0, r1, #3
and r0, r1, #3
cmp r0, #2
bne 4f
/* here, offset is 2 (16-bits aligned, special cased) */
/* make sure we have at least 16 bytes to process */
subs r2, r2, #16
addmi r2, r2, #16
@@ -341,3 +338,4 @@ END(memcmp)
mov r2, #4
ldmfd sp!, {r5, r6, r7}
b 8b
END(memcmp)

View File

@@ -0,0 +1,406 @@
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Prototype: void *memcpy (void *dst, const void *src, size_t count). */
/* Use the version of memcpy implemented using LDRD and STRD.
This version is tuned for Cortex-A15.
This might not be the best for other ARMv7-A CPUs,
but there is no predefine to distinguish between
different CPUs in the same architecture,
and this version is better than the plain memcpy provided in newlib.
Therefore, we use this version for all ARMv7-A CPUS. */
/* To make the same code compile for both ARM and Thumb instruction
sets, switch to unified syntax at the beginning of this function.
However, by using the same code, we may be missing optimization
opportunities. For instance, in LDRD/STRD instructions, the first
destination register must be even and the second consecutive in
ARM state, but not in Thumb state. */
#include <machine/cpu-features.h>
#include <machine/asm.h>
.syntax unified
ENTRY(memcpy)
/* Assumes that n >= 0, and dst, src are valid pointers.
If there is at least 8 bytes to copy, use LDRD/STRD.
If src and dst are misaligned with different offsets,
first copy byte by byte until dst is aligned,
and then copy using LDRD/STRD and shift if needed.
When less than 8 left, copy a word and then byte by byte. */
/* Save registers (r0 holds the return value):
optimized push {r0, r4, r5, r6, r7, lr}.
To try and improve performance, stack layout changed,
i.e., not keeping the stack looking like users expect
(highest numbered register at highest address). */
.save {r0, lr}
push {r0, lr}
.save {r4, r5}
strd r4, r5, [sp, #-8]!
.save {r6, r7}
strd r6, r7, [sp, #-8]!
/* TODO: Add debug frame directives.
We don't need exception unwind directives, because the code below
does not throw any exceptions and does not call any other functions.
Generally, newlib functions like this lack debug information for
assembler source. */
/* Get copying of tiny blocks out of the way first. */
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
/* Check word alignment. */
ands ip, r0, #3 /* ip = last 2 bits of dst. */
bne dst_not_word_aligned /* If dst is not word-aligned. */
/* Get here if dst is word-aligned. */
ands ip, r1, #3 /* ip = last 2 bits of src. */
bne src_not_word_aligned /* If src is not word-aligned. */
word_aligned:
/* Get here if source and dst both are word-aligned.
The number of bytes remaining to copy is r2+4. */
/* Is there is at least 64 bytes to copy? */
subs r2, r2, #60
blt copy_less_than_64 /* If r2 + 4 < 64. */
/* First, align the destination buffer to 8-bytes,
to make sure double loads and stores don't cross cache line boundary,
as they are then more expensive even if the data is in the cache
(require two load/store issue cycles instead of one).
If only one of the buffers is not 8-bytes aligned,
then it's more important to align dst than src,
because there is more penalty for stores
than loads that cross cacheline boundary.
This check and realignment are only worth doing
if there is a lot to copy. */
/* Get here if dst is word aligned,
i.e., the 2 least significant bits are 0.
If dst is not 2w aligned (i.e., the 3rd bit is not set in dst),
then copy 1 word (4 bytes). */
ands r3, r0, #4
beq 11f /* If dst already two-word aligned. */
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #4
blt copy_less_than_64
11:
/* TODO: Align to cacheline (useful for PLD optimization). */
/* Every loop iteration copies 64 bytes. */
1:
.irp offset, #0, #8, #16, #24, #32, #40, #48, #56
ldrd r4, r5, [r1, \offset]
strd r4, r5, [r0, \offset]
.endr
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 1b /* If there is more to copy. */
copy_less_than_64:
/* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
Restore the count if there is more than 7 bytes to copy. */
adds r2, r2, #56
blt copy_less_than_8
/* Copy 8 bytes at a time. */
2:
ldrd r4, r5, [r1], #8
strd r4, r5, [r0], #8
subs r2, r2, #8
bge 2b /* If there is more to copy. */
copy_less_than_8:
/* Get here if less than 8 bytes to copy, -8 <= r2 < 0.
Check if there is more to copy. */
cmn r2, #8
beq return /* If r2 + 8 == 0. */
/* Restore the count if there is more than 3 bytes to copy. */
adds r2, r2, #4
blt copy_less_than_4
/* Copy 4 bytes. */
ldr r3, [r1], #4
str r3, [r0], #4
copy_less_than_4:
/* Get here if less than 4 bytes to copy, -4 <= r2 < 0. */
/* Restore the count, check if there is more to copy. */
adds r2, r2, #4
beq return /* If r2 == 0. */
/* Get here with r2 is in {1,2,3}={01,10,11}. */
/* Logical shift left r2, insert 0s, update flags. */
lsls r2, r2, #31
/* Copy byte by byte.
Condition ne means the last bit of r2 is 0.
Condition cs means the second to last bit of r2 is set,
i.e., r2 is 1 or 3. */
itt ne
ldrbne r3, [r1], #1
strbne r3, [r0], #1
itttt cs
ldrbcs r4, [r1], #1
ldrbcs r5, [r1]
strbcs r4, [r0], #1
strbcs r5, [r0]
return:
/* Restore registers: optimized pop {r0, r4, r5, r6, r7, pc} */
/* This is the only return point of memcpy. */
ldrd r6, r7, [sp], #8
ldrd r4, r5, [sp], #8
pop {r0, pc}
#ifndef __ARM_FEATURE_UNALIGNED
/* The following assembly macro implements misaligned copy in software.
Assumes that dst is word aligned, src is at offset "pull" bits from
word, push = 32 - pull, and the number of bytes that remain to copy
is r2 + 4, r2 >= 0. */
/* In the code below, r2 is the number of bytes that remain to be
written. The number of bytes read is always larger, because we have
partial words in the shift queue. */
.macro miscopy pull push shiftleft shiftright
/* Align src to the previous word boundary. */
bic r1, r1, #3
/* Initialize the shift queue. */
ldr r5, [r1], #4 /* Load a word from source. */
subs r2, r2, #4
blt 6f /* Go to misaligned copy of less than 8 bytes. */
/* Get here if there is more than 8 bytes to copy.
The number of bytes to copy is r2+8, r2 >= 0. */
subs r2, r2, #56
blt 4f /* Go to misaligned copy of less than 64 bytes. */
3:
/* Get here if there is more than 64 bytes to copy.
The number of bytes to copy is r2+64, r2 >= 0. */
/* Copy 64 bytes in every iteration.
Use a partial word from the shift queue. */
.irp offset, #0, #8, #16, #24, #32, #40, #48, #56
mov r6, r5, \shiftleft #\pull
ldrd r4, r5, [r1, \offset]
orr r6, r6, r4, \shiftright #\push
mov r7, r4, \shiftleft #\pull
orr r7, r7, r5, \shiftright #\push
strd r6, r7, [r0, \offset]
.endr
add r1, r1, #64
add r0, r0, #64
subs r2, r2, #64
bge 3b
4:
/* Get here if there is less than 64 bytes to copy (-64 <= r2 < 0)
and they are misaligned. */
/* Restore the count if there is more than 7 bytes to copy. */
adds r2, r2, #56
blt 6f /* Go to misaligned copy of less than 8 bytes. */
5:
/* Copy 8 bytes at a time.
Use a partial word from the shift queue. */
mov r6, r5, \shiftleft #\pull
ldrd r4, r5, [r1], #8
orr r6, r6, r4, \shiftright #\push
mov r7, r4, \shiftleft #\pull
orr r7, r7, r5, \shiftright #\push
strd r6, r7, [r0], #8
subs r2, r2, #8
bge 5b /* If there is more to copy. */
6:
/* Get here if there less than 8 bytes to copy (-8 <= r2 < 0)
and they are misaligned. */
/* Check if there is more to copy. */
cmn r2, #8
beq return
/* Check if there is less than 4 bytes to copy. */
cmn r2, #4
itt lt
/* Restore src offset from word-align. */
sublt r1, r1, #(\push / 8)
blt copy_less_than_4
/* Use a partial word from the shift queue. */
mov r3, r5, \shiftleft #\pull
/* Load a word from src, but without writeback
(this word is not fully written to dst). */
ldr r5, [r1]
/* Restore src offset from word-align. */
add r1, r1, #(\pull / 8)
/* Shift bytes to create one dst word and store it. */
orr r3, r3, r5, \shiftright #\push
str r3, [r0], #4
/* Use single byte copying of the remaining bytes. */
b copy_less_than_4
.endm
#endif /* not __ARM_FEATURE_UNALIGNED */
dst_not_word_aligned:
/* Get here when dst is not aligned and ip has the last 2 bits of dst,
i.e., ip is the offset of dst from word.
The number of bytes that remains to copy is r2 + 4,
i.e., there are at least 4 bytes to copy.
Write a partial word (0 to 3 bytes), such that dst becomes
word-aligned. */
/* If dst is at ip bytes offset from a word (with 0 < ip < 4),
then there are (4 - ip) bytes to fill up to align dst to the next
word. */
rsb ip, ip, #4 /* ip = #4 - ip. */
cmp ip, #2
/* Copy byte by byte with conditionals. */
itt gt
ldrbgt r3, [r1], #1
strbgt r3, [r0], #1
itt ge
ldrbge r4, [r1], #1
strbge r4, [r0], #1
ldrb lr, [r1], #1
strb lr, [r0], #1
/* Update the count.
ip holds the number of bytes we have just copied. */
subs r2, r2, ip /* r2 = r2 - ip. */
blt copy_less_than_4 /* If r2 < ip. */
/* Get here if there are more than 4 bytes to copy.
Check if src is aligned. If beforehand src and dst were not word
aligned but congruent (same offset), then now they are both
word-aligned, and we can copy the rest efficiently (without
shifting). */
ands ip, r1, #3 /* ip = last 2 bits of src. */
beq word_aligned /* If r1 is word-aligned. */
src_not_word_aligned:
/* Get here when src is not word-aligned, but dst is word-aligned.
The number of bytes that remains to copy is r2+4. */
#ifdef __ARM_FEATURE_UNALIGNED
/* Copy word by word using LDR when alignment can be done in hardware,
i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */
subs r2, r2, #60
blt 8f
7:
/* Copy 64 bytes in every loop iteration. */
.irp offset, #0, #4, #8, #12, #16, #20, #24, #28, #32, #36, #40, #44, #48, #52, #56, #60
ldr r3, [r1, \offset]
str r3, [r0, \offset]
.endr
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 7b
8:
/* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
Check if there is more than 3 bytes to copy. */
adds r2, r2, #60
blt copy_less_than_4
9:
/* Get here if there is less than 64 but at least 4 bytes to copy,
where the number of bytes to copy is r2+4. */
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #4
bge 9b
b copy_less_than_4
#else /* not __ARM_FEATURE_UNALIGNED */
/* ip has last 2 bits of src,
i.e., ip is the offset of src from word, and ip > 0.
Compute shifts needed to copy from src to dst. */
cmp ip, #2
beq miscopy_16_16 /* If ip == 2. */
bge miscopy_24_8 /* If ip == 3. */
/* Get here if ip == 1. */
/* Endian independent macros for shifting bytes within registers. */
#ifndef __ARMEB__
miscopy_8_24: miscopy pull=8 push=24 shiftleft=lsr shiftright=lsl
miscopy_16_16: miscopy pull=16 push=16 shiftleft=lsr shiftright=lsl
miscopy_24_8: miscopy pull=24 push=8 shiftleft=lsr shiftright=lsl
#else /* not __ARMEB__ */
miscopy_8_24: miscopy pull=8 push=24 shiftleft=lsl shiftright=lsr
miscopy_16_16: miscopy pull=16 push=16 shiftleft=lsl shiftright=lsr
miscopy_24_8: miscopy pull=24 push=8 shiftleft=lsl shiftright=lsr
#endif /* not __ARMEB__ */
#endif /* not __ARM_FEATURE_UNALIGNED */
END(memcpy)

View File

@@ -0,0 +1,614 @@
/* Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This memcpy routine is optimised for Cortex-A15 cores and takes advantage
of VFP or NEON when built with the appropriate flags.
Assumptions:
ARMv6 (ARMv7-a if using Neon)
ARM state
Unaligned accesses
LDRD/STRD support unaligned word accesses
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
.syntax unified
/* This implementation requires ARM state. */
.arm
#ifdef __ARM_NEON__
.fpu neon
.arch armv7-a
# define FRAME_SIZE 4
# define USE_VFP
# define USE_NEON
#elif !defined (__SOFTFP__)
.arch armv6
.fpu vfpv2
# define FRAME_SIZE 32
# define USE_VFP
#else
.arch armv6
# define FRAME_SIZE 32
#endif
/* Old versions of GAS incorrectly implement the NEON align semantics. */
#ifdef BROKEN_ASM_NEON_ALIGN
#define ALIGN(addr, align) addr,:align
#else
#define ALIGN(addr, align) addr:align
#endif
#define PC_OFFSET 8 /* PC pipeline compensation. */
#define INSN_SIZE 4
/* Call parameters. */
#define dstin r0
#define src r1
#define count r2
/* Locals. */
#define tmp1 r3
#define dst ip
#define tmp2 r10
#ifndef USE_NEON
/* For bulk copies using GP registers. */
#define A_l r2 /* Call-clobbered. */
#define A_h r3 /* Call-clobbered. */
#define B_l r4
#define B_h r5
#define C_l r6
#define C_h r7
#define D_l r8
#define D_h r9
#endif
/* Number of lines ahead to pre-fetch data. If you change this the code
below will need adjustment to compensate. */
#define prefetch_lines 5
#ifdef USE_VFP
.macro cpy_line_vfp vreg, base
vstr \vreg, [dst, #\base]
vldr \vreg, [src, #\base]
vstr d0, [dst, #\base + 8]
vldr d0, [src, #\base + 8]
vstr d1, [dst, #\base + 16]
vldr d1, [src, #\base + 16]
vstr d2, [dst, #\base + 24]
vldr d2, [src, #\base + 24]
vstr \vreg, [dst, #\base + 32]
vldr \vreg, [src, #\base + prefetch_lines * 64 - 32]
vstr d0, [dst, #\base + 40]
vldr d0, [src, #\base + 40]
vstr d1, [dst, #\base + 48]
vldr d1, [src, #\base + 48]
vstr d2, [dst, #\base + 56]
vldr d2, [src, #\base + 56]
.endm
.macro cpy_tail_vfp vreg, base
vstr \vreg, [dst, #\base]
vldr \vreg, [src, #\base]
vstr d0, [dst, #\base + 8]
vldr d0, [src, #\base + 8]
vstr d1, [dst, #\base + 16]
vldr d1, [src, #\base + 16]
vstr d2, [dst, #\base + 24]
vldr d2, [src, #\base + 24]
vstr \vreg, [dst, #\base + 32]
vstr d0, [dst, #\base + 40]
vldr d0, [src, #\base + 40]
vstr d1, [dst, #\base + 48]
vldr d1, [src, #\base + 48]
vstr d2, [dst, #\base + 56]
vldr d2, [src, #\base + 56]
.endm
#endif
.p2align 6
ENTRY(memcpy)
mov dst, dstin /* Preserve dstin, we need to return it. */
cmp count, #64
bge .Lcpy_not_short
/* Deal with small copies quickly by dropping straight into the
exit block. */
.Ltail63unaligned:
#ifdef USE_NEON
and tmp1, count, #0x38
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
vld1.8 {d0}, [src]! /* 14 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 12 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 10 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 8 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 6 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 4 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 2 words to go. */
vst1.8 {d0}, [dst]!
tst count, #4
ldrne tmp1, [src], #4
strne tmp1, [dst], #4
#else
/* Copy up to 15 full words of data. May not be aligned. */
/* Cannot use VFP for unaligned data. */
and tmp1, count, #0x3c
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(60 - PC_OFFSET/2 + INSN_SIZE/2)
/* Jump directly into the sequence below at the correct offset. */
add pc, pc, tmp1, lsl #1
ldr tmp1, [src, #-60] /* 15 words to go. */
str tmp1, [dst, #-60]
ldr tmp1, [src, #-56] /* 14 words to go. */
str tmp1, [dst, #-56]
ldr tmp1, [src, #-52]
str tmp1, [dst, #-52]
ldr tmp1, [src, #-48] /* 12 words to go. */
str tmp1, [dst, #-48]
ldr tmp1, [src, #-44]
str tmp1, [dst, #-44]
ldr tmp1, [src, #-40] /* 10 words to go. */
str tmp1, [dst, #-40]
ldr tmp1, [src, #-36]
str tmp1, [dst, #-36]
ldr tmp1, [src, #-32] /* 8 words to go. */
str tmp1, [dst, #-32]
ldr tmp1, [src, #-28]
str tmp1, [dst, #-28]
ldr tmp1, [src, #-24] /* 6 words to go. */
str tmp1, [dst, #-24]
ldr tmp1, [src, #-20]
str tmp1, [dst, #-20]
ldr tmp1, [src, #-16] /* 4 words to go. */
str tmp1, [dst, #-16]
ldr tmp1, [src, #-12]
str tmp1, [dst, #-12]
ldr tmp1, [src, #-8] /* 2 words to go. */
str tmp1, [dst, #-8]
ldr tmp1, [src, #-4]
str tmp1, [dst, #-4]
#endif
lsls count, count, #31
ldrhcs tmp1, [src], #2
ldrbne src, [src] /* Src is dead, use as a scratch. */
strhcs tmp1, [dst], #2
strbne src, [dst]
bx lr
.Lcpy_not_short:
/* At least 64 bytes to copy, but don't know the alignment yet. */
str tmp2, [sp, #-FRAME_SIZE]!
and tmp2, src, #7
and tmp1, dst, #7
cmp tmp1, tmp2
bne .Lcpy_notaligned
#ifdef USE_VFP
/* Magic dust alert! Force VFP on Cortex-A9. Experiments show
that the FP pipeline is much better at streaming loads and
stores. This is outside the critical loop. */
vmov.f32 s0, s0
#endif
/* SRC and DST have the same mutual 32-bit alignment, but we may
still need to pre-copy some bytes to get to natural alignment.
We bring DST into full 64-bit alignment. */
lsls tmp2, dst, #29
beq 1f
rsbs tmp2, tmp2, #0
sub count, count, tmp2, lsr #29
ldrmi tmp1, [src], #4
strmi tmp1, [dst], #4
lsls tmp2, tmp2, #2
ldrhcs tmp1, [src], #2
ldrbne tmp2, [src], #1
strhcs tmp1, [dst], #2
strbne tmp2, [dst], #1
1:
subs tmp2, count, #64 /* Use tmp2 for count. */
blt .Ltail63aligned
cmp tmp2, #512
bge .Lcpy_body_long
.Lcpy_body_medium: /* Count in tmp2. */
#ifdef USE_VFP
1:
vldr d0, [src, #0]
subs tmp2, tmp2, #64
vldr d1, [src, #8]
vstr d0, [dst, #0]
vldr d0, [src, #16]
vstr d1, [dst, #8]
vldr d1, [src, #24]
vstr d0, [dst, #16]
vldr d0, [src, #32]
vstr d1, [dst, #24]
vldr d1, [src, #40]
vstr d0, [dst, #32]
vldr d0, [src, #48]
vstr d1, [dst, #40]
vldr d1, [src, #56]
vstr d0, [dst, #48]
add src, src, #64
vstr d1, [dst, #56]
add dst, dst, #64
bge 1b
tst tmp2, #0x3f
beq .Ldone
.Ltail63aligned: /* Count in tmp2. */
and tmp1, tmp2, #0x38
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
vldr d0, [src, #-56] /* 14 words to go. */
vstr d0, [dst, #-56]
vldr d0, [src, #-48] /* 12 words to go. */
vstr d0, [dst, #-48]
vldr d0, [src, #-40] /* 10 words to go. */
vstr d0, [dst, #-40]
vldr d0, [src, #-32] /* 8 words to go. */
vstr d0, [dst, #-32]
vldr d0, [src, #-24] /* 6 words to go. */
vstr d0, [dst, #-24]
vldr d0, [src, #-16] /* 4 words to go. */
vstr d0, [dst, #-16]
vldr d0, [src, #-8] /* 2 words to go. */
vstr d0, [dst, #-8]
#else
sub src, src, #8
sub dst, dst, #8
1:
ldrd A_l, A_h, [src, #8]
strd A_l, A_h, [dst, #8]
ldrd A_l, A_h, [src, #16]
strd A_l, A_h, [dst, #16]
ldrd A_l, A_h, [src, #24]
strd A_l, A_h, [dst, #24]
ldrd A_l, A_h, [src, #32]
strd A_l, A_h, [dst, #32]
ldrd A_l, A_h, [src, #40]
strd A_l, A_h, [dst, #40]
ldrd A_l, A_h, [src, #48]
strd A_l, A_h, [dst, #48]
ldrd A_l, A_h, [src, #56]
strd A_l, A_h, [dst, #56]
ldrd A_l, A_h, [src, #64]!
strd A_l, A_h, [dst, #64]!
subs tmp2, tmp2, #64
bge 1b
tst tmp2, #0x3f
bne 1f
ldr tmp2,[sp], #FRAME_SIZE
bx lr
1:
add src, src, #8
add dst, dst, #8
.Ltail63aligned: /* Count in tmp2. */
/* Copy up to 7 d-words of data. Similar to Ltail63unaligned, but
we know that the src and dest are 32-bit aligned so we can use
LDRD/STRD to improve efficiency. */
/* TMP2 is now negative, but we don't care about that. The bottom
six bits still tell us how many bytes are left to copy. */
and tmp1, tmp2, #0x38
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
ldrd A_l, A_h, [src, #-56] /* 14 words to go. */
strd A_l, A_h, [dst, #-56]
ldrd A_l, A_h, [src, #-48] /* 12 words to go. */
strd A_l, A_h, [dst, #-48]
ldrd A_l, A_h, [src, #-40] /* 10 words to go. */
strd A_l, A_h, [dst, #-40]
ldrd A_l, A_h, [src, #-32] /* 8 words to go. */
strd A_l, A_h, [dst, #-32]
ldrd A_l, A_h, [src, #-24] /* 6 words to go. */
strd A_l, A_h, [dst, #-24]
ldrd A_l, A_h, [src, #-16] /* 4 words to go. */
strd A_l, A_h, [dst, #-16]
ldrd A_l, A_h, [src, #-8] /* 2 words to go. */
strd A_l, A_h, [dst, #-8]
#endif
tst tmp2, #4
ldrne tmp1, [src], #4
strne tmp1, [dst], #4
lsls tmp2, tmp2, #31 /* Count (tmp2) now dead. */
ldrhcs tmp1, [src], #2
ldrbne tmp2, [src]
strhcs tmp1, [dst], #2
strbne tmp2, [dst]
.Ldone:
ldr tmp2, [sp], #FRAME_SIZE
bx lr
.Lcpy_body_long: /* Count in tmp2. */
/* Long copy. We know that there's at least (prefetch_lines * 64)
bytes to go. */
#ifdef USE_VFP
/* Don't use PLD. Instead, read some data in advance of the current
copy position into a register. This should act like a PLD
operation but we won't have to repeat the transfer. */
vldr d3, [src, #0]
vldr d4, [src, #64]
vldr d5, [src, #128]
vldr d6, [src, #192]
vldr d7, [src, #256]
vldr d0, [src, #8]
vldr d1, [src, #16]
vldr d2, [src, #24]
add src, src, #32
subs tmp2, tmp2, #prefetch_lines * 64 * 2
blt 2f
1:
cpy_line_vfp d3, 0
cpy_line_vfp d4, 64
cpy_line_vfp d5, 128
add dst, dst, #3 * 64
add src, src, #3 * 64
cpy_line_vfp d6, 0
cpy_line_vfp d7, 64
add dst, dst, #2 * 64
add src, src, #2 * 64
subs tmp2, tmp2, #prefetch_lines * 64
bge 1b
2:
cpy_tail_vfp d3, 0
cpy_tail_vfp d4, 64
cpy_tail_vfp d5, 128
add src, src, #3 * 64
add dst, dst, #3 * 64
cpy_tail_vfp d6, 0
vstr d7, [dst, #64]
vldr d7, [src, #64]
vstr d0, [dst, #64 + 8]
vldr d0, [src, #64 + 8]
vstr d1, [dst, #64 + 16]
vldr d1, [src, #64 + 16]
vstr d2, [dst, #64 + 24]
vldr d2, [src, #64 + 24]
vstr d7, [dst, #64 + 32]
add src, src, #96
vstr d0, [dst, #64 + 40]
vstr d1, [dst, #64 + 48]
vstr d2, [dst, #64 + 56]
add dst, dst, #128
add tmp2, tmp2, #prefetch_lines * 64
b .Lcpy_body_medium
#else
/* Long copy. Use an SMS style loop to maximize the I/O
bandwidth of the core. We don't have enough spare registers
to synthesise prefetching, so use PLD operations. */
/* Pre-bias src and dst. */
sub src, src, #8
sub dst, dst, #8
pld [src, #8]
pld [src, #72]
subs tmp2, tmp2, #64
pld [src, #136]
ldrd A_l, A_h, [src, #8]
strd B_l, B_h, [sp, #8]
ldrd B_l, B_h, [src, #16]
strd C_l, C_h, [sp, #16]
ldrd C_l, C_h, [src, #24]
strd D_l, D_h, [sp, #24]
pld [src, #200]
ldrd D_l, D_h, [src, #32]!
b 1f
.p2align 6
2:
pld [src, #232]
strd A_l, A_h, [dst, #40]
ldrd A_l, A_h, [src, #40]
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [src, #48]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [src, #56]
strd D_l, D_h, [dst, #64]!
ldrd D_l, D_h, [src, #64]!
subs tmp2, tmp2, #64
1:
strd A_l, A_h, [dst, #8]
ldrd A_l, A_h, [src, #8]
strd B_l, B_h, [dst, #16]
ldrd B_l, B_h, [src, #16]
strd C_l, C_h, [dst, #24]
ldrd C_l, C_h, [src, #24]
strd D_l, D_h, [dst, #32]
ldrd D_l, D_h, [src, #32]
bcs 2b
/* Save the remaining bytes and restore the callee-saved regs. */
strd A_l, A_h, [dst, #40]
add src, src, #40
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [sp, #8]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [sp, #16]
strd D_l, D_h, [dst, #64]
ldrd D_l, D_h, [sp, #24]
add dst, dst, #72
tst tmp2, #0x3f
bne .Ltail63aligned
ldr tmp2, [sp], #FRAME_SIZE
bx lr
#endif
.Lcpy_notaligned:
pld [src]
pld [src, #64]
/* There's at least 64 bytes to copy, but there is no mutual
alignment. */
/* Bring DST to 64-bit alignment. */
lsls tmp2, dst, #29
pld [src, #(2 * 64)]
beq 1f
rsbs tmp2, tmp2, #0
sub count, count, tmp2, lsr #29
ldrmi tmp1, [src], #4
strmi tmp1, [dst], #4
lsls tmp2, tmp2, #2
ldrbne tmp1, [src], #1
ldrhcs tmp2, [src], #2
strbne tmp1, [dst], #1
strhcs tmp2, [dst], #2
1:
pld [src, #(3 * 64)]
subs count, count, #64
ldrmi tmp2, [sp], #FRAME_SIZE
bmi .Ltail63unaligned
pld [src, #(4 * 64)]
#ifdef USE_NEON
vld1.8 {d0-d3}, [src]!
vld1.8 {d4-d7}, [src]!
subs count, count, #64
bmi 2f
1:
pld [src, #(4 * 64)]
vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
vld1.8 {d0-d3}, [src]!
vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
vld1.8 {d4-d7}, [src]!
subs count, count, #64
bpl 1b
2:
vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
ands count, count, #0x3f
#else
/* Use an SMS style loop to maximize the I/O bandwidth. */
sub src, src, #4
sub dst, dst, #8
subs tmp2, count, #64 /* Use tmp2 for count. */
ldr A_l, [src, #4]
ldr A_h, [src, #8]
strd B_l, B_h, [sp, #8]
ldr B_l, [src, #12]
ldr B_h, [src, #16]
strd C_l, C_h, [sp, #16]
ldr C_l, [src, #20]
ldr C_h, [src, #24]
strd D_l, D_h, [sp, #24]
ldr D_l, [src, #28]
ldr D_h, [src, #32]!
b 1f
.p2align 6
2:
pld [src, #(5 * 64) - (32 - 4)]
strd A_l, A_h, [dst, #40]
ldr A_l, [src, #36]
ldr A_h, [src, #40]
strd B_l, B_h, [dst, #48]
ldr B_l, [src, #44]
ldr B_h, [src, #48]
strd C_l, C_h, [dst, #56]
ldr C_l, [src, #52]
ldr C_h, [src, #56]
strd D_l, D_h, [dst, #64]!
ldr D_l, [src, #60]
ldr D_h, [src, #64]!
subs tmp2, tmp2, #64
1:
strd A_l, A_h, [dst, #8]
ldr A_l, [src, #4]
ldr A_h, [src, #8]
strd B_l, B_h, [dst, #16]
ldr B_l, [src, #12]
ldr B_h, [src, #16]
strd C_l, C_h, [dst, #24]
ldr C_l, [src, #20]
ldr C_h, [src, #24]
strd D_l, D_h, [dst, #32]
ldr D_l, [src, #28]
ldr D_h, [src, #32]
bcs 2b
/* Save the remaining bytes and restore the callee-saved regs. */
strd A_l, A_h, [dst, #40]
add src, src, #36
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [sp, #8]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [sp, #16]
strd D_l, D_h, [dst, #64]
ldrd D_l, D_h, [sp, #24]
add dst, dst, #72
ands count, tmp2, #0x3f
#endif
ldr tmp2, [sp], #FRAME_SIZE
bne .Ltail63unaligned
bx lr
END(memcpy)

View File

@@ -42,6 +42,7 @@
ENTRY(bzero)
mov r2, r1
mov r1, #0
// Fall through to memset...
END(bzero)
ENTRY(memset)

View File

@@ -0,0 +1,787 @@
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm_asm.h"
#ifdef __ARMEB__
#define S2LOMEM lsl
#define S2LOMEMEQ lsleq
#define S2HIMEM lsr
#define MSB 0x000000ff
#define LSB 0xff000000
#define BYTE0_OFFSET 24
#define BYTE1_OFFSET 16
#define BYTE2_OFFSET 8
#define BYTE3_OFFSET 0
#else /* not __ARMEB__ */
#define S2LOMEM lsr
#define S2LOMEMEQ lsreq
#define S2HIMEM lsl
#define BYTE0_OFFSET 0
#define BYTE1_OFFSET 8
#define BYTE2_OFFSET 16
#define BYTE3_OFFSET 24
#define MSB 0xff000000
#define LSB 0x000000ff
#endif /* not __ARMEB__ */
.syntax unified
#if defined (__thumb__)
.thumb
.thumb_func
#endif
.global strcmp
.type strcmp, %function
strcmp:
#if (defined (__thumb__) && !defined (__thumb2__))
1:
ldrb r2, [r0]
ldrb r3, [r1]
adds r0, r0, #1
adds r1, r1, #1
cmp r2, #0
beq 2f
cmp r2, r3
beq 1b
2:
subs r0, r2, r3
bx lr
#elif (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
1:
ldrb r2, [r0], #1
ldrb r3, [r1], #1
cmp r2, #1
it cs
cmpcs r2, r3
beq 1b
subs r0, r2, r3
RETURN
#elif (defined (_ISA_THUMB_2) || defined (_ISA_ARM_6))
/* Use LDRD whenever possible. */
/* The main thing to look out for when comparing large blocks is that
the loads do not cross a page boundary when loading past the index
of the byte with the first difference or the first string-terminator.
For example, if the strings are identical and the string-terminator
is at index k, byte by byte comparison will not load beyond address
s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
k; double word - up to 7 bytes. If the load of these bytes crosses
a page boundary, it might cause a memory fault (if the page is not mapped)
that would not have happened in byte by byte comparison.
If an address is (double) word aligned, then a load of a (double) word
from that address will not cross a page boundary.
Therefore, the algorithm below considers word and double-word alignment
of strings separately. */
/* High-level description of the algorithm.
* The fast path: if both strings are double-word aligned,
use LDRD to load two words from each string in every loop iteration.
* If the strings have the same offset from a word boundary,
use LDRB to load and compare byte by byte until
the first string is aligned to a word boundary (at most 3 bytes).
This is optimized for quick return on short unaligned strings.
* If the strings have the same offset from a double-word boundary,
use LDRD to load two words from each string in every loop iteration, as in the fast path.
* If the strings do not have the same offset from a double-word boundary,
load a word from the second string before the loop to initialize the queue.
Use LDRD to load two words from every string in every loop iteration.
Inside the loop, load the second word from the second string only after comparing
the first word, using the queued value, to guarantee safety across page boundaries.
* If the strings do not have the same offset from a word boundary,
use LDR and a shift queue. Order of loads and comparisons matters,
similarly to the previous case.
* Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
* The only difference between ARM and Thumb modes is the use of CBZ instruction.
* The only difference between big and little endian is the use of REV in little endian
to compute the return value, instead of MOV.
* No preload. [TODO.]
*/
.macro m_cbz reg label
#ifdef __thumb2__
cbz \reg, \label
#else /* not defined __thumb2__ */
cmp \reg, #0
beq \label
#endif /* not defined __thumb2__ */
.endm /* m_cbz */
.macro m_cbnz reg label
#ifdef __thumb2__
cbnz \reg, \label
#else /* not defined __thumb2__ */
cmp \reg, #0
bne \label
#endif /* not defined __thumb2__ */
.endm /* m_cbnz */
.macro init
/* Macro to save temporary registers and prepare magic values. */
subs sp, sp, #16
strd r4, r5, [sp, #8]
strd r6, r7, [sp]
mvn r6, #0 /* all F */
mov r7, #0 /* all 0 */
.endm /* init */
.macro magic_compare_and_branch w1 w2 label
/* Macro to compare registers w1 and w2 and conditionally branch to label. */
cmp \w1, \w2 /* Are w1 and w2 the same? */
magic_find_zero_bytes \w1
it eq
cmpeq ip, #0 /* Is there a zero byte in w1? */
bne \label
.endm /* magic_compare_and_branch */
.macro magic_find_zero_bytes w1
/* Macro to find all-zero bytes in w1, result is in ip. */
#if (defined (__ARM_FEATURE_DSP))
uadd8 ip, \w1, r6
sel ip, r7, r6
#else /* not defined (__ARM_FEATURE_DSP) */
/* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
Coincidently, these processors only have Thumb-2 mode, where we can use the
the (large) magic constant available directly as an immediate in instructions.
Note that we cannot use the magic constant in ARM mode, where we need
to create the constant in a register. */
sub ip, \w1, #0x01010101
bic ip, ip, \w1
and ip, ip, #0x80808080
#endif /* not defined (__ARM_FEATURE_DSP) */
.endm /* magic_find_zero_bytes */
.macro setup_return w1 w2
#ifdef __ARMEB__
mov r1, \w1
mov r2, \w2
#else /* not __ARMEB__ */
rev r1, \w1
rev r2, \w2
#endif /* not __ARMEB__ */
.endm /* setup_return */
/*
optpld r0, #0
optpld r1, #0
*/
/* Are both strings double-word aligned? */
orr ip, r0, r1
tst ip, #7
bne do_align
/* Fast path. */
init
doubleword_aligned:
/* Get here when the strings to compare are double-word aligned. */
/* Compare two words in every iteration. */
.p2align 2
2:
/*
optpld r0, #16
optpld r1, #16
*/
/* Load the next double-word from each string. */
ldrd r2, r3, [r0], #8
ldrd r4, r5, [r1], #8
magic_compare_and_branch w1=r2, w2=r4, label=return_24
magic_compare_and_branch w1=r3, w2=r5, label=return_35
b 2b
do_align:
/* Is the first string word-aligned? */
ands ip, r0, #3
beq word_aligned_r0
/* Fast compare byte by byte until the first string is word-aligned. */
/* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
to read until the next word boudnary is 4-ip. */
bic r0, r0, #3
ldr r2, [r0], #4
lsls ip, ip, #31
beq byte2
bcs byte3
byte1:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE1_OFFSET
subs ip, r3, ip
bne fast_return
m_cbz reg=r3, label=fast_return
byte2:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE2_OFFSET
subs ip, r3, ip
bne fast_return
m_cbz reg=r3, label=fast_return
byte3:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE3_OFFSET
subs ip, r3, ip
bne fast_return
m_cbnz reg=r3, label=word_aligned_r0
fast_return:
mov r0, ip
bx lr
word_aligned_r0:
init
/* The first string is word-aligned. */
/* Is the second string word-aligned? */
ands ip, r1, #3
bne strcmp_unaligned
word_aligned:
/* The strings are word-aligned. */
/* Is the first string double-word aligned? */
tst r0, #4
beq doubleword_aligned_r0
/* If r0 is not double-word aligned yet, align it by loading
and comparing the next word from each string. */
ldr r2, [r0], #4
ldr r4, [r1], #4
magic_compare_and_branch w1=r2 w2=r4 label=return_24
doubleword_aligned_r0:
/* Get here when r0 is double-word aligned. */
/* Is r1 doubleword_aligned? */
tst r1, #4
beq doubleword_aligned
/* Get here when the strings to compare are word-aligned,
r0 is double-word aligned, but r1 is not double-word aligned. */
/* Initialize the queue. */
ldr r5, [r1], #4
/* Compare two words in every iteration. */
.p2align 2
3:
/*
optpld r0, #16
optpld r1, #16
*/
/* Load the next double-word from each string and compare. */
ldrd r2, r3, [r0], #8
magic_compare_and_branch w1=r2 w2=r5 label=return_25
ldrd r4, r5, [r1], #8
magic_compare_and_branch w1=r3 w2=r4 label=return_34
b 3b
.macro miscmp_word offsetlo offsethi
/* Macro to compare misaligned strings. */
/* r0, r1 are word-aligned, and at least one of the strings
is not double-word aligned. */
/* Compare one word in every loop iteration. */
/* OFFSETLO is the original bit-offset of r1 from a word-boundary,
OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
/* Initialize the shift queue. */
ldr r5, [r1], #4
/* Compare one word from each string in every loop iteration. */
.p2align 2
7:
ldr r3, [r0], #4
S2LOMEM r5, r5, #\offsetlo
magic_find_zero_bytes w1=r3
cmp r7, ip, S2HIMEM #\offsetlo
and r2, r3, r6, S2LOMEM #\offsetlo
it eq
cmpeq r2, r5
bne return_25
ldr r5, [r1], #4
cmp ip, #0
eor r3, r2, r3
S2HIMEM r2, r5, #\offsethi
it eq
cmpeq r3, r2
bne return_32
b 7b
.endm /* miscmp_word */
strcmp_unaligned:
/* r0 is word-aligned, r1 is at offset ip from a word. */
/* Align r1 to the (previous) word-boundary. */
bic r1, r1, #3
/* Unaligned comparison word by word using LDRs. */
cmp ip, #2
beq miscmp_word_16 /* If ip == 2. */
bge miscmp_word_24 /* If ip == 3. */
miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */
miscmp_word_16: miscmp_word offsetlo=16 offsethi=16
miscmp_word_24: miscmp_word offsetlo=24 offsethi=8
return_32:
setup_return w1=r3, w2=r2
b do_return
return_34:
setup_return w1=r3, w2=r4
b do_return
return_25:
setup_return w1=r2, w2=r5
b do_return
return_35:
setup_return w1=r3, w2=r5
b do_return
return_24:
setup_return w1=r2, w2=r4
do_return:
#ifdef __ARMEB__
mov r0, ip
#else /* not __ARMEB__ */
rev r0, ip
#endif /* not __ARMEB__ */
/* Restore temporaries early, before computing the return value. */
ldrd r6, r7, [sp]
ldrd r4, r5, [sp, #8]
adds sp, sp, #16
/* There is a zero or a different byte between r1 and r2. */
/* r0 contains a mask of all-zero bytes in r1. */
/* Using r0 and not ip here because cbz requires low register. */
m_cbz reg=r0, label=compute_return_value
clz r0, r0
/* r0 contains the number of bits on the left of the first all-zero byte in r1. */
rsb r0, r0, #24
/* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
lsr r1, r1, r0
lsr r2, r2, r0
compute_return_value:
movs r0, #1
cmp r1, r2
/* The return value is computed as follows.
If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
which means r0:=r0-r0-1 and r0 is #-1 at return.
If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
which means r0:=r0-r0 and r0 is #0 at return.
(C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
it ls
sbcls r0, r0, r0
bx lr
#else /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)
defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) ||
(defined (__thumb__) && !defined (__thumb2__))) */
/* Use LDR whenever possible. */
#ifdef __thumb2__
#define magic1(REG) 0x01010101
#define magic2(REG) 0x80808080
#else
#define magic1(REG) REG
#define magic2(REG) REG, lsl #7
#endif
optpld r0
optpld r1
eor r2, r0, r1
tst r2, #3
/* Strings not at same byte offset from a word boundary. */
bne strcmp_unaligned
ands r2, r0, #3
bic r0, r0, #3
bic r1, r1, #3
ldr ip, [r0], #4
it eq
ldreq r3, [r1], #4
beq 1f
/* Although s1 and s2 have identical initial alignment, they are
not currently word aligned. Rather than comparing bytes,
make sure that any bytes fetched from before the addressed
bytes are forced to 0xff. Then they will always compare
equal. */
eor r2, r2, #3
lsl r2, r2, #3
mvn r3, MSB
S2LOMEM r2, r3, r2
ldr r3, [r1], #4
orr ip, ip, r2
orr r3, r3, r2
1:
#ifndef __thumb2__
/* Load the 'magic' constant 0x01010101. */
str r4, [sp, #-4]!
mov r4, #1
orr r4, r4, r4, lsl #8
orr r4, r4, r4, lsl #16
#endif
.p2align 2
4:
optpld r0, #8
optpld r1, #8
sub r2, ip, magic1(r4)
cmp ip, r3
itttt eq
/* check for any zero bytes in first word */
biceq r2, r2, ip
tsteq r2, magic2(r4)
ldreq ip, [r0], #4
ldreq r3, [r1], #4
beq 4b
2:
/* There's a zero or a different byte in the word */
S2HIMEM r0, ip, #24
S2LOMEM ip, ip, #8
cmp r0, #1
it cs
cmpcs r0, r3, S2HIMEM #24
it eq
S2LOMEMEQ r3, r3, #8
beq 2b
/* On a big-endian machine, r0 contains the desired byte in bits
0-7; on a little-endian machine they are in bits 24-31. In
both cases the other bits in r0 are all zero. For r3 the
interesting byte is at the other end of the word, but the
other bits are not necessarily zero. We need a signed result
representing the differnece in the unsigned bytes, so for the
little-endian case we can't just shift the interesting bits
up. */
#ifdef __ARMEB__
sub r0, r0, r3, lsr #24
#else
and r3, r3, #255
#ifdef __thumb2__
/* No RSB instruction in Thumb2 */
lsr r0, r0, #24
sub r0, r0, r3
#else
rsb r0, r3, r0, lsr #24
#endif
#endif
#ifndef __thumb2__
ldr r4, [sp], #4
#endif
RETURN
strcmp_unaligned:
#if 0
/* The assembly code below is based on the following alogrithm. */
#ifdef __ARMEB__
#define RSHIFT <<
#define LSHIFT >>
#else
#define RSHIFT >>
#define LSHIFT <<
#endif
#define body(shift) \
mask = 0xffffffffU RSHIFT shift; \
w1 = *wp1++; \
w2 = *wp2++; \
do \
{ \
t1 = w1 & mask; \
if (__builtin_expect(t1 != w2 RSHIFT shift, 0)) \
{ \
w2 RSHIFT= shift; \
break; \
} \
if (__builtin_expect(((w1 - b1) & ~w1) & (b1 << 7), 0)) \
{ \
/* See comment in assembler below re syndrome on big-endian */\
if ((((w1 - b1) & ~w1) & (b1 << 7)) & mask) \
w2 RSHIFT= shift; \
else \
{ \
w2 = *wp2; \
t1 = w1 RSHIFT (32 - shift); \
w2 = (w2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
} \
break; \
} \
w2 = *wp2++; \
t1 ^= w1; \
if (__builtin_expect(t1 != w2 LSHIFT (32 - shift), 0)) \
{ \
t1 = w1 >> (32 - shift); \
w2 = (w2 << (32 - shift)) RSHIFT (32 - shift); \
break; \
} \
w1 = *wp1++; \
} while (1)
const unsigned* wp1;
const unsigned* wp2;
unsigned w1, w2;
unsigned mask;
unsigned shift;
unsigned b1 = 0x01010101;
char c1, c2;
unsigned t1;
while (((unsigned) s1) & 3)
{
c1 = *s1++;
c2 = *s2++;
if (c1 == 0 || c1 != c2)
return c1 - (int)c2;
}
wp1 = (unsigned*) (((unsigned)s1) & ~3);
wp2 = (unsigned*) (((unsigned)s2) & ~3);
t1 = ((unsigned) s2) & 3;
if (t1 == 1)
{
body(8);
}
else if (t1 == 2)
{
body(16);
}
else
{
body (24);
}
do
{
#ifdef __ARMEB__
c1 = (char) t1 >> 24;
c2 = (char) w2 >> 24;
#else /* not __ARMEB__ */
c1 = (char) t1;
c2 = (char) w2;
#endif /* not __ARMEB__ */
t1 RSHIFT= 8;
w2 RSHIFT= 8;
} while (c1 != 0 && c1 == c2);
return c1 - c2;
#endif /* 0 */
wp1 .req r0
wp2 .req r1
b1 .req r2
w1 .req r4
w2 .req r5
t1 .req ip
@ r3 is scratch
/* First of all, compare bytes until wp1(sp1) is word-aligned. */
1:
tst wp1, #3
beq 2f
ldrb r2, [wp1], #1
ldrb r3, [wp2], #1
cmp r2, #1
it cs
cmpcs r2, r3
beq 1b
sub r0, r2, r3
RETURN
2:
str r5, [sp, #-4]!
str r4, [sp, #-4]!
//stmfd sp!, {r4, r5}
mov b1, #1
orr b1, b1, b1, lsl #8
orr b1, b1, b1, lsl #16
and t1, wp2, #3
bic wp2, wp2, #3
ldr w1, [wp1], #4
ldr w2, [wp2], #4
cmp t1, #2
beq 2f
bhi 3f
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
1:
bic t1, w1, MSB
cmp t1, w2, S2LOMEM #8
sub r3, w1, b1
bic r3, r3, w1
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, S2HIMEM #24
bne 6f
ldr w1, [wp1], #4
b 1b
4:
S2LOMEM w2, w2, #8
b 8f
5:
#ifdef __ARMEB__
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst w1, #0xff000000
itt ne
tstne w1, #0x00ff0000
tstne w1, #0x0000ff00
beq 7f
#else
bics r3, r3, #0xff000000
bne 7f
#endif
ldrb w2, [wp2]
S2LOMEM t1, w1, #24
#ifdef __ARMEB__
lsl w2, w2, #24
#endif
b 8f
6:
S2LOMEM t1, w1, #24
and w2, w2, LSB
b 8f
/* Critical inner Loop: Block with 2 bytes initial overlap */
.p2align 2
2:
S2HIMEM t1, w1, #16
sub r3, w1, b1
S2LOMEM t1, t1, #16
bic r3, r3, w1
cmp t1, w2, S2LOMEM #16
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, S2HIMEM #16
bne 6f
ldr w1, [wp1], #4
b 2b
5:
#ifdef __ARMEB__
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst w1, #0xff000000
it ne
tstne w1, #0x00ff0000
beq 7f
#else
lsls r3, r3, #16
bne 7f
#endif
ldrh w2, [wp2]
S2LOMEM t1, w1, #16
#ifdef __ARMEB__
lsl w2, w2, #16
#endif
b 8f
6:
S2HIMEM w2, w2, #16
S2LOMEM t1, w1, #16
4:
S2LOMEM w2, w2, #16
b 8f
/* Critical inner Loop: Block with 1 byte initial overlap */
.p2align 2
3:
and t1, w1, LSB
cmp t1, w2, S2LOMEM #24
sub r3, w1, b1
bic r3, r3, w1
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, S2HIMEM #8
bne 6f
ldr w1, [wp1], #4
b 3b
4:
S2LOMEM w2, w2, #24
b 8f
5:
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst w1, LSB
beq 7f
ldr w2, [wp2], #4
6:
S2LOMEM t1, w1, #8
bic w2, w2, MSB
b 8f
7:
mov r0, #0
//ldmfd sp!, {r4, r5}
ldr r4, [sp], #4
ldr r5, [sp], #4
RETURN
8:
and r2, t1, LSB
and r0, w2, LSB
cmp r0, #1
it cs
cmpcs r0, r2
itt eq
S2LOMEMEQ t1, t1, #8
S2LOMEMEQ w2, w2, #8
beq 8b
sub r0, r2, r0
//ldmfd sp!, {r4, r5}
ldr r4, [sp], #4
ldr r5, [sp], #4
RETURN
#endif /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)
defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) ||
(defined (__thumb__) && !defined (__thumb2__))) */

View File

@@ -26,8 +26,8 @@
* SUCH DAMAGE.
*/
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(syscall)
mov ip, sp
@@ -39,7 +39,8 @@ ENTRY(syscall)
ldmfd ip, {r3, r4, r5, r6}
swi #0
ldmfd sp!, {r4, r5, r6, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(syscall)

View File

@@ -25,6 +25,11 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/err.h>
#include <asm/unistd.h>
#include <machine/asm.h>
/* unlike our auto-generated syscall stubs, this code saves lr
on the stack, as well as a few other registers. this makes
our stack unwinder happy, when we generate debug stack
@@ -33,19 +38,14 @@
of a corrupted malloc heap).
*/
#include <sys/linux-syscalls.h>
#include <machine/asm.h>
#ifndef __NR_tgkill
#define __NR_tgkill 268
#endif
ENTRY(tgkill)
.save {r4-r7, ip, lr}
stmfd sp!, {r4-r7, ip, lr}
ldr r7, =__NR_tgkill
swi #0
ldmfd sp!, {r4-r7, ip, lr}
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(tgkill)

View File

@@ -25,6 +25,11 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/err.h>
#include <asm/unistd.h>
#include <machine/asm.h>
/* unlike our auto-generated syscall stubs, this code saves lr
on the stack, as well as a few other registers. this makes
our stack unwinder happy, when we generate debug stack
@@ -33,19 +38,13 @@
of a corrupted malloc heap).
*/
#include <sys/linux-syscalls.h>
#include <machine/asm.h>
#ifndef __NR_tkill
#define __NR_tkill 238
#endif
ENTRY(tkill)
stmfd sp!, {r4-r7, ip, lr}
ldr r7, =__NR_tkill
swi #0
ldmfd sp!, {r4-r7, ip, lr}
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(tkill)

View File

@@ -0,0 +1,324 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Prototype: void *memcpy (void *dst, const void *src, size_t count). */
// This version is tuned for the Cortex-A15 processor.
#include <machine/cpu-features.h>
#include <machine/asm.h>
.text
.syntax unified
.fpu neon
#define CACHE_LINE_SIZE 64
ENTRY(memcpy)
// Assumes that n >= 0, and dst, src are valid pointers.
// For any sizes less than 832 use the neon code that doesn't
// care about the src alignment. This avoids any checks
// for src alignment, and offers the best improvement since
// smaller sized copies are dominated by the overhead of
// the pre and post main loop.
// For larger copies, if src and dst cannot both be aligned to
// word boundaries, use the neon code.
// For all other copies, align dst to a double word boundary
// and copy using LDRD/STRD instructions.
// Save registers (r0 holds the return value):
// optimized push {r0, lr}.
.save {r0, lr}
pld [r1, #(CACHE_LINE_SIZE*16)]
push {r0, lr}
cmp r2, #16
blo copy_less_than_16_unknown_align
cmp r2, #832
bge check_alignment
copy_unknown_alignment:
// Unknown alignment of src and dst.
// Assumes that the first few bytes have already been prefetched.
// Align destination to 128 bits. The mainloop store instructions
// require this alignment or they will throw an exception.
rsb r3, r0, #0
ands r3, r3, #0xF
beq 2f
// Copy up to 15 bytes (count in r3).
sub r2, r2, r3
movs ip, r3, lsl #31
itt mi
ldrbmi lr, [r1], #1
strbmi lr, [r0], #1
itttt cs
ldrbcs ip, [r1], #1
ldrbcs lr, [r1], #1
strbcs ip, [r0], #1
strbcs lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// Copies 4 bytes, dst 32 bits aligned before, at least 64 bits after.
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
1: bcc 2f
// Copies 8 bytes, dst 64 bits aligned before, at least 128 bits after.
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
2: // Make sure we have at least 64 bytes to copy.
subs r2, r2, #64
blo 2f
1: // The main loop copies 64 bytes at a time.
vld1.8 {d0 - d3}, [r1]!
vld1.8 {d4 - d7}, [r1]!
pld [r1, #(CACHE_LINE_SIZE*4)]
subs r2, r2, #64
vst1.8 {d0 - d3}, [r0, :128]!
vst1.8 {d4 - d7}, [r0, :128]!
bhs 1b
2: // Fix-up the remaining count and make sure we have >= 32 bytes left.
adds r2, r2, #32
blo 3f
// 32 bytes. These cache lines were already preloaded.
vld1.8 {d0 - d3}, [r1]!
sub r2, r2, #32
vst1.8 {d0 - d3}, [r0, :128]!
3: // Less than 32 left.
add r2, r2, #32
tst r2, #0x10
beq copy_less_than_16_unknown_align
// Copies 16 bytes, destination 128 bits aligned.
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0, :128]!
copy_less_than_16_unknown_align:
// Copy up to 15 bytes (count in r2).
movs ip, r2, lsl #29
bcc 1f
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]!
1: bge 2f
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
2: // Copy 0 to 4 bytes.
lsls r2, r2, #31
itt ne
ldrbne lr, [r1], #1
strbne lr, [r0], #1
itttt cs
ldrbcs ip, [r1], #1
ldrbcs lr, [r1]
strbcs ip, [r0], #1
strbcs lr, [r0]
pop {r0, pc}
check_alignment:
// If src and dst cannot both be aligned to a word boundary,
// use the unaligned copy version.
eor r3, r0, r1
ands r3, r3, #0x3
bne copy_unknown_alignment
// To try and improve performance, stack layout changed,
// i.e., not keeping the stack looking like users expect
// (highest numbered register at highest address).
// TODO: Add debug frame directives.
// We don't need exception unwind directives, because the code below
// does not throw any exceptions and does not call any other functions.
// Generally, newlib functions like this lack debug information for
// assembler source.
.save {r4, r5}
strd r4, r5, [sp, #-8]!
.save {r6, r7}
strd r6, r7, [sp, #-8]!
.save {r8, r9}
strd r8, r9, [sp, #-8]!
// Optimized for already aligned dst code.
ands ip, r0, #3
bne dst_not_word_aligned
word_aligned:
// Align the destination buffer to 8 bytes, to make sure double
// loads and stores don't cross a cache line boundary,
// as they are then more expensive even if the data is in the cache
// (require two load/store issue cycles instead of one).
// If only one of the buffers is not 8 bytes aligned,
// then it's more important to align dst than src,
// because there is more penalty for stores
// than loads that cross a cacheline boundary.
// This check and realignment are only done if there is >= 832
// bytes to copy.
// Dst is word aligned, but check if it is already double word aligned.
ands r3, r0, #4
beq 1f
ldr r3, [r1], #4
str r3, [r0], #4
sub r2, #4
1: // Can only get here if > 64 bytes to copy, so don't do check r2.
sub r2, #64
2: // Every loop iteration copies 64 bytes.
.irp offset, #0, #8, #16, #24, #32
ldrd r4, r5, [r1, \offset]
strd r4, r5, [r0, \offset]
.endr
ldrd r4, r5, [r1, #40]
ldrd r6, r7, [r1, #48]
ldrd r8, r9, [r1, #56]
// Keep the pld as far from the next load as possible.
// The amount to prefetch was determined experimentally using
// large sizes, and verifying the prefetch size does not affect
// the smaller copies too much.
// WARNING: If the ldrd and strd instructions get too far away
// from each other, performance suffers. Three loads
// in a row is the best tradeoff.
pld [r1, #(CACHE_LINE_SIZE*16)]
strd r4, r5, [r0, #40]
strd r6, r7, [r0, #48]
strd r8, r9, [r0, #56]
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 2b
// Fix-up the remaining count and make sure we have >= 32 bytes left.
adds r2, r2, #32
blo 4f
// Copy 32 bytes. These cache lines were already preloaded.
.irp offset, #0, #8, #16, #24
ldrd r4, r5, [r1, \offset]
strd r4, r5, [r0, \offset]
.endr
add r1, r1, #32
add r0, r0, #32
sub r2, r2, #32
4: // Less than 32 left.
add r2, r2, #32
tst r2, #0x10
beq 5f
// Copy 16 bytes.
.irp offset, #0, #8
ldrd r4, r5, [r1, \offset]
strd r4, r5, [r0, \offset]
.endr
add r1, r1, #16
add r0, r0, #16
5: // Copy up to 15 bytes (count in r2).
movs ip, r2, lsl #29
bcc 1f
// Copy 8 bytes.
ldrd r4, r5, [r1], #8
strd r4, r5, [r0], #8
1: bge 2f
// Copy 4 bytes.
ldr r4, [r1], #4
str r4, [r0], #4
2: // Copy 0 to 4 bytes.
lsls r2, r2, #31
itt ne
ldrbne lr, [r1], #1
strbne lr, [r0], #1
itttt cs
ldrbcs ip, [r1], #1
ldrbcs lr, [r1]
strbcs ip, [r0], #1
strbcs lr, [r0]
// Restore registers: optimized pop {r0, pc}
ldrd r8, r9, [sp], #8
ldrd r6, r7, [sp], #8
ldrd r4, r5, [sp], #8
pop {r0, pc}
dst_not_word_aligned:
// Align dst to word.
rsb ip, ip, #4
cmp ip, #2
itt gt
ldrbgt lr, [r1], #1
strbgt lr, [r0], #1
itt ge
ldrbge lr, [r1], #1
strbge lr, [r0], #1
ldrb lr, [r1], #1
strb lr, [r0], #1
sub r2, r2, ip
// Src is guaranteed to be at least word aligned by this point.
b word_aligned
END(memcpy)

View File

@@ -0,0 +1,164 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* Optimized memset() for ARM.
*
* memset() returns its first argument.
*/
.fpu neon
.syntax unified
ENTRY(bzero)
mov r2, r1
mov r1, #0
// Fall through to memset...
END(bzero)
ENTRY(memset)
.save {r0}
stmfd sp!, {r0}
// The new algorithm is slower for copies < 16 so use the old
// neon code in that case.
cmp r2, #16
blo set_less_than_16_unknown_align
// Use strd which requires an even and odd register so move the
// values so that:
// r0 and r1 contain the memset value
// r2 is the number of bytes to set
// r3 is the destination pointer
mov r3, r0
// Copy the byte value in every byte of r1.
mov r1, r1, lsl #24
orr r1, r1, r1, lsr #8
orr r1, r1, r1, lsr #16
check_alignment:
// Align destination to a double word to avoid the strd crossing
// a cache line boundary.
ands ip, r3, #7
bne do_double_word_align
double_word_aligned:
mov r0, r1
subs r2, #64
blo set_less_than_64
1: // Main loop sets 64 bytes at a time.
.irp offset, #0, #8, #16, #24, #32, #40, #48, #56
strd r0, r1, [r3, \offset]
.endr
add r3, #64
subs r2, #64
bge 1b
set_less_than_64:
// Restore r2 to the count of bytes left to set.
add r2, #64
lsls ip, r2, #27
bcc set_less_than_32
// Set 32 bytes.
.irp offset, #0, #8, #16, #24
strd r0, r1, [r3, \offset]
.endr
add r3, #32
set_less_than_32:
bpl set_less_than_16
// Set 16 bytes.
.irp offset, #0, #8
strd r0, r1, [r3, \offset]
.endr
add r3, #16
set_less_than_16:
// Less than 16 bytes to set.
lsls ip, r2, #29
bcc set_less_than_8
// Set 8 bytes.
strd r0, r1, [r3], #8
set_less_than_8:
bpl set_less_than_4
// Set 4 bytes
str r1, [r3], #4
set_less_than_4:
lsls ip, r2, #31
it ne
strbne r1, [r3], #1
itt cs
strbcs r1, [r3], #1
strbcs r1, [r3]
ldmfd sp!, {r0}
bx lr
do_double_word_align:
rsb ip, ip, #8
sub r2, r2, ip
movs r0, ip, lsl #31
it mi
strbmi r1, [r3], #1
itt cs
strbcs r1, [r3], #1
strbcs r1, [r3], #1
// Dst is at least word aligned by this point.
cmp ip, #4
blo double_word_aligned
str r1, [r3], #4
b double_word_aligned
set_less_than_16_unknown_align:
// Set up to 15 bytes.
vdup.8 d0, r1
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r0]!
1: bge 2f
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
it mi
strbmi r1, [r0], #1
itt cs
strbcs r1, [r0], #1
strbcs r1, [r0], #1
ldmfd sp!, {r0}
bx lr
END(memset)

View File

@@ -0,0 +1,377 @@
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
#ifdef __ARMEB__
#define S2LOMEM lsl
#define S2LOMEMEQ lsleq
#define S2HIMEM lsr
#define MSB 0x000000ff
#define LSB 0xff000000
#define BYTE0_OFFSET 24
#define BYTE1_OFFSET 16
#define BYTE2_OFFSET 8
#define BYTE3_OFFSET 0
#else /* not __ARMEB__ */
#define S2LOMEM lsr
#define S2LOMEMEQ lsreq
#define S2HIMEM lsl
#define BYTE0_OFFSET 0
#define BYTE1_OFFSET 8
#define BYTE2_OFFSET 16
#define BYTE3_OFFSET 24
#define MSB 0xff000000
#define LSB 0x000000ff
#endif /* not __ARMEB__ */
.syntax unified
#if defined (__thumb__)
.thumb
.thumb_func
#endif
ENTRY(strcmp)
/* Use LDRD whenever possible. */
/* The main thing to look out for when comparing large blocks is that
the loads do not cross a page boundary when loading past the index
of the byte with the first difference or the first string-terminator.
For example, if the strings are identical and the string-terminator
is at index k, byte by byte comparison will not load beyond address
s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
k; double word - up to 7 bytes. If the load of these bytes crosses
a page boundary, it might cause a memory fault (if the page is not mapped)
that would not have happened in byte by byte comparison.
If an address is (double) word aligned, then a load of a (double) word
from that address will not cross a page boundary.
Therefore, the algorithm below considers word and double-word alignment
of strings separately. */
/* High-level description of the algorithm.
* The fast path: if both strings are double-word aligned,
use LDRD to load two words from each string in every loop iteration.
* If the strings have the same offset from a word boundary,
use LDRB to load and compare byte by byte until
the first string is aligned to a word boundary (at most 3 bytes).
This is optimized for quick return on short unaligned strings.
* If the strings have the same offset from a double-word boundary,
use LDRD to load two words from each string in every loop iteration, as in the fast path.
* If the strings do not have the same offset from a double-word boundary,
load a word from the second string before the loop to initialize the queue.
Use LDRD to load two words from every string in every loop iteration.
Inside the loop, load the second word from the second string only after comparing
the first word, using the queued value, to guarantee safety across page boundaries.
* If the strings do not have the same offset from a word boundary,
use LDR and a shift queue. Order of loads and comparisons matters,
similarly to the previous case.
* Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
* The only difference between ARM and Thumb modes is the use of CBZ instruction.
* The only difference between big and little endian is the use of REV in little endian
to compute the return value, instead of MOV.
*/
.macro m_cbz reg label
#ifdef __thumb2__
cbz \reg, \label
#else /* not defined __thumb2__ */
cmp \reg, #0
beq \label
#endif /* not defined __thumb2__ */
.endm /* m_cbz */
.macro m_cbnz reg label
#ifdef __thumb2__
cbnz \reg, \label
#else /* not defined __thumb2__ */
cmp \reg, #0
bne \label
#endif /* not defined __thumb2__ */
.endm /* m_cbnz */
.macro init
/* Macro to save temporary registers and prepare magic values. */
subs sp, sp, #16
strd r4, r5, [sp, #8]
strd r6, r7, [sp]
mvn r6, #0 /* all F */
mov r7, #0 /* all 0 */
.endm /* init */
.macro magic_compare_and_branch w1 w2 label
/* Macro to compare registers w1 and w2 and conditionally branch to label. */
cmp \w1, \w2 /* Are w1 and w2 the same? */
magic_find_zero_bytes \w1
it eq
cmpeq ip, #0 /* Is there a zero byte in w1? */
bne \label
.endm /* magic_compare_and_branch */
.macro magic_find_zero_bytes w1
/* Macro to find all-zero bytes in w1, result is in ip. */
#if (defined (__ARM_FEATURE_DSP))
uadd8 ip, \w1, r6
sel ip, r7, r6
#else /* not defined (__ARM_FEATURE_DSP) */
/* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
Coincidently, these processors only have Thumb-2 mode, where we can use the
the (large) magic constant available directly as an immediate in instructions.
Note that we cannot use the magic constant in ARM mode, where we need
to create the constant in a register. */
sub ip, \w1, #0x01010101
bic ip, ip, \w1
and ip, ip, #0x80808080
#endif /* not defined (__ARM_FEATURE_DSP) */
.endm /* magic_find_zero_bytes */
.macro setup_return w1 w2
#ifdef __ARMEB__
mov r1, \w1
mov r2, \w2
#else /* not __ARMEB__ */
rev r1, \w1
rev r2, \w2
#endif /* not __ARMEB__ */
.endm /* setup_return */
pld [r0, #0]
pld [r1, #0]
/* Are both strings double-word aligned? */
orr ip, r0, r1
tst ip, #7
bne do_align
/* Fast path. */
init
doubleword_aligned:
/* Get here when the strings to compare are double-word aligned. */
/* Compare two words in every iteration. */
.p2align 2
2:
pld [r0, #16]
pld [r1, #16]
/* Load the next double-word from each string. */
ldrd r2, r3, [r0], #8
ldrd r4, r5, [r1], #8
magic_compare_and_branch w1=r2, w2=r4, label=return_24
magic_compare_and_branch w1=r3, w2=r5, label=return_35
b 2b
do_align:
/* Is the first string word-aligned? */
ands ip, r0, #3
beq word_aligned_r0
/* Fast compare byte by byte until the first string is word-aligned. */
/* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
to read until the next word boundary is 4-ip. */
bic r0, r0, #3
ldr r2, [r0], #4
lsls ip, ip, #31
beq byte2
bcs byte3
byte1:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE1_OFFSET
subs ip, r3, ip
bne fast_return
m_cbz reg=r3, label=fast_return
byte2:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE2_OFFSET
subs ip, r3, ip
bne fast_return
m_cbz reg=r3, label=fast_return
byte3:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE3_OFFSET
subs ip, r3, ip
bne fast_return
m_cbnz reg=r3, label=word_aligned_r0
fast_return:
mov r0, ip
bx lr
word_aligned_r0:
init
/* The first string is word-aligned. */
/* Is the second string word-aligned? */
ands ip, r1, #3
bne strcmp_unaligned
word_aligned:
/* The strings are word-aligned. */
/* Is the first string double-word aligned? */
tst r0, #4
beq doubleword_aligned_r0
/* If r0 is not double-word aligned yet, align it by loading
and comparing the next word from each string. */
ldr r2, [r0], #4
ldr r4, [r1], #4
magic_compare_and_branch w1=r2 w2=r4 label=return_24
doubleword_aligned_r0:
/* Get here when r0 is double-word aligned. */
/* Is r1 doubleword_aligned? */
tst r1, #4
beq doubleword_aligned
/* Get here when the strings to compare are word-aligned,
r0 is double-word aligned, but r1 is not double-word aligned. */
/* Initialize the queue. */
ldr r5, [r1], #4
/* Compare two words in every iteration. */
.p2align 2
3:
pld [r0, #16]
pld [r1, #16]
/* Load the next double-word from each string and compare. */
ldrd r2, r3, [r0], #8
magic_compare_and_branch w1=r2 w2=r5 label=return_25
ldrd r4, r5, [r1], #8
magic_compare_and_branch w1=r3 w2=r4 label=return_34
b 3b
.macro miscmp_word offsetlo offsethi
/* Macro to compare misaligned strings. */
/* r0, r1 are word-aligned, and at least one of the strings
is not double-word aligned. */
/* Compare one word in every loop iteration. */
/* OFFSETLO is the original bit-offset of r1 from a word-boundary,
OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
/* Initialize the shift queue. */
ldr r5, [r1], #4
/* Compare one word from each string in every loop iteration. */
.p2align 2
7:
ldr r3, [r0], #4
S2LOMEM r5, r5, #\offsetlo
magic_find_zero_bytes w1=r3
cmp r7, ip, S2HIMEM #\offsetlo
and r2, r3, r6, S2LOMEM #\offsetlo
it eq
cmpeq r2, r5
bne return_25
ldr r5, [r1], #4
cmp ip, #0
eor r3, r2, r3
S2HIMEM r2, r5, #\offsethi
it eq
cmpeq r3, r2
bne return_32
b 7b
.endm /* miscmp_word */
strcmp_unaligned:
/* r0 is word-aligned, r1 is at offset ip from a word. */
/* Align r1 to the (previous) word-boundary. */
bic r1, r1, #3
/* Unaligned comparison word by word using LDRs. */
cmp ip, #2
beq miscmp_word_16 /* If ip == 2. */
bge miscmp_word_24 /* If ip == 3. */
miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */
miscmp_word_16: miscmp_word offsetlo=16 offsethi=16
miscmp_word_24: miscmp_word offsetlo=24 offsethi=8
return_32:
setup_return w1=r3, w2=r2
b do_return
return_34:
setup_return w1=r3, w2=r4
b do_return
return_25:
setup_return w1=r2, w2=r5
b do_return
return_35:
setup_return w1=r3, w2=r5
b do_return
return_24:
setup_return w1=r2, w2=r4
do_return:
#ifdef __ARMEB__
mov r0, ip
#else /* not __ARMEB__ */
rev r0, ip
#endif /* not __ARMEB__ */
/* Restore temporaries early, before computing the return value. */
ldrd r6, r7, [sp]
ldrd r4, r5, [sp, #8]
adds sp, sp, #16
/* There is a zero or a different byte between r1 and r2. */
/* r0 contains a mask of all-zero bytes in r1. */
/* Using r0 and not ip here because cbz requires low register. */
m_cbz reg=r0, label=compute_return_value
clz r0, r0
/* r0 contains the number of bits on the left of the first all-zero byte in r1. */
rsb r0, r0, #24
/* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
lsr r1, r1, r0
lsr r2, r2, r0
compute_return_value:
movs r0, #1
cmp r1, r2
/* The return value is computed as follows.
If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
which means r0:=r0-r0-1 and r0 is #-1 at return.
If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
which means r0:=r0-r0 and r0 is #0 at return.
(C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
it ls
sbcls r0, r0, r0
bx lr
END(strcmp)

View File

@@ -0,0 +1,151 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
.syntax unified
.thumb
.thumb_func
ENTRY(strlen)
pld [r1, #128]
mov r1, r0
rsb r3, r0, #0
ands r3, r3, #7
beq mainloop
// Align to a double word (64 bits).
ands ip, r3, #1
beq align_to_32
ldrb r2, [r1], #1
cmp r2, #0
beq update_count_and_return
align_to_32:
ands ip, r3, #2
beq align_to_64
ldrb r2, [r1], #1
cmp r2, #0
beq update_count_and_return
ldrb r2, [r1], #1
cmp r2, #0
beq update_count_and_return
align_to_64:
ands ip, r3, #4
beq mainloop
ldr r3, [r1], #4
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne zero_in_second_register
mainloop:
ldrd r2, r3, [r1], #8
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne zero_in_second_register
b mainloop
zero_in_first_register:
sub r1, r1, #4
zero_in_second_register:
sub r0, r1, r0
// Check for zero in byte 0.
ands r1, ip, #0x80
beq check_byte1
sub r0, r0, #4
bx lr
check_byte1:
// Check for zero in byte 1.
ands r1, ip, #0x8000
beq check_byte2
sub r0, r0, #3
bx lr
check_byte2:
// Check for zero in byte 2.
ands r1, ip, #0x800000
beq return
sub r0, r0, #2
bx lr
update_count_and_return:
sub r0, r1, r0
return:
sub r0, r0, #1
bx lr
END(strlen)

View File

@@ -0,0 +1,6 @@
$(call libc-add-cpu-variant-src,MEMCPY,arch-arm/cortex-a15/bionic/memcpy.S)
$(call libc-add-cpu-variant-src,MEMSET,arch-arm/cortex-a15/bionic/memset.S)
$(call libc-add-cpu-variant-src,STRCMP,arch-arm/cortex-a15/bionic/strcmp.S)
$(call libc-add-cpu-variant-src,STRLEN,arch-arm/cortex-a15/bionic/strlen.S)
include bionic/libc/arch-arm/generic/generic.mk

View File

@@ -0,0 +1 @@
include bionic/libc/arch-arm/cortex-a15/cortex-a15.mk

View File

@@ -0,0 +1,211 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions, that supports neon instructions, and that has a 32 byte
* cache line.
*/
.text
.fpu neon
#define CACHE_LINE_SIZE 32
ENTRY(memcpy)
.save {r0, lr}
/* start preloading as early as possible */
pld [r1, #(CACHE_LINE_SIZE * 0)]
stmfd sp!, {r0, lr}
pld [r1, #(CACHE_LINE_SIZE * 2)]
// Check so divider is at least 16 bytes, needed for alignment code.
cmp r2, #16
blo 5f
/* check if buffers are aligned. If so, run arm-only version */
eor r3, r0, r1
ands r3, r3, #0x3
beq 11f
/* Check the upper size limit for Neon unaligned memory access in memcpy */
cmp r2, #224
blo 3f
/* align destination to 16 bytes for the write-buffer */
rsb r3, r0, #0
ands r3, r3, #0xF
beq 3f
/* copy up to 15-bytes (count in r3) */
sub r2, r2, r3
movs ip, r3, lsl #31
ldrmib lr, [r1], #1
strmib lr, [r0], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// copies 4 bytes, destination 32-bits aligned
vld1.32 {d0[0]}, [r1]!
vst1.32 {d0[0]}, [r0, :32]!
1: bcc 2f
// copies 8 bytes, destination 64-bits aligned
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
2:
/* preload immediately the next cache line, which we may need */
pld [r1, #(CACHE_LINE_SIZE * 0)]
pld [r1, #(CACHE_LINE_SIZE * 2)]
3:
/* make sure we have at least 64 bytes to copy */
subs r2, r2, #64
blo 2f
/* preload all the cache lines we need */
pld [r1, #(CACHE_LINE_SIZE * 4)]
pld [r1, #(CACHE_LINE_SIZE * 6)]
1: /* The main loop copies 64 bytes at a time */
vld1.8 {d0 - d3}, [r1]!
vld1.8 {d4 - d7}, [r1]!
pld [r1, #(CACHE_LINE_SIZE * 6)]
subs r2, r2, #64
vst1.8 {d0 - d3}, [r0]!
vst1.8 {d4 - d7}, [r0]!
bhs 1b
2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
add r2, r2, #64
subs r2, r2, #32
blo 4f
3: /* 32 bytes at a time. These cache lines were already preloaded */
vld1.8 {d0 - d3}, [r1]!
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0]!
bhs 3b
4: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 5f
// copies 16 bytes, 128-bits aligned
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0]!
5: /* copy up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]!
1: bge 2f
vld1.32 {d0[0]}, [r1]!
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
ldrmib r3, [r1], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strmib r3, [r0], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
ldmfd sp!, {r0, lr}
bx lr
11:
/* Simple arm-only copy loop to handle aligned copy operations */
stmfd sp!, {r4, r5, r6, r7, r8}
pld [r1, #(CACHE_LINE_SIZE * 4)]
/* Check alignment */
rsb r3, r1, #0
ands r3, #3
beq 2f
/* align source to 32 bits. We need to insert 2 instructions between
* a ldr[b|h] and str[b|h] because byte and half-word instructions
* stall 2 cycles.
*/
movs r12, r3, lsl #31
sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
ldrmib r3, [r1], #1
ldrcsb r4, [r1], #1
ldrcsb r5, [r1], #1
strmib r3, [r0], #1
strcsb r4, [r0], #1
strcsb r5, [r0], #1
2:
subs r2, r2, #64
blt 4f
3: /* Main copy loop, copying 64 bytes at a time */
pld [r1, #(CACHE_LINE_SIZE * 8)]
ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
subs r2, r2, #64
bge 3b
4: /* Check if there are > 32 bytes left */
adds r2, r2, #64
subs r2, r2, #32
blt 5f
/* Copy 32 bytes */
ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
subs r2, #32
5: /* Handle any remaining bytes */
adds r2, #32
beq 6f
movs r12, r2, lsl #28
ldmcsia r1!, {r3, r4, r5, r6} /* 16 bytes */
ldmmiia r1!, {r7, r8} /* 8 bytes */
stmcsia r0!, {r3, r4, r5, r6}
stmmiia r0!, {r7, r8}
movs r12, r2, lsl #30
ldrcs r3, [r1], #4 /* 4 bytes */
ldrmih r4, [r1], #2 /* 2 bytes */
strcs r3, [r0], #4
strmih r4, [r0], #2
tst r2, #0x1
ldrneb r3, [r1] /* last byte */
strneb r3, [r0]
6:
ldmfd sp!, {r4, r5, r6, r7, r8}
ldmfd sp!, {r0, pc}
END(memcpy)

View File

@@ -0,0 +1,152 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions and that supports neon instructions.
*/
.fpu neon
ENTRY(bzero)
mov r2, r1
mov r1, #0
END(bzero)
/* memset() returns its first argument. */
ENTRY(memset)
# The neon memset only wins for less than 132.
cmp r2, #132
bhi 11f
.save {r0}
stmfd sp!, {r0}
vdup.8 q0, r1
/* make sure we have at least 32 bytes to write */
subs r2, r2, #32
blo 2f
vmov q1, q0
1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0]!
bhs 1b
2: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 3f
// writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r0]!
3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r0]!
1: bge 2f
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
strmib r1, [r0], #1
strcsb r1, [r0], #1
strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr
11:
/* compute the offset to align the destination
* offset = (4-(src&3))&3 = -src & 3
*/
.save {r0, r4-r7, lr}
stmfd sp!, {r0, r4-r7, lr}
rsb r3, r0, #0
ands r3, r3, #3
cmp r3, r2
movhi r3, r2
/* splat r1 */
mov r1, r1, lsl #24
orr r1, r1, r1, lsr #8
orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31
strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strcsb r1, [r0], #1
strmib r1, [r0], #1
subs r2, r2, r3
ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */
mov r12, r1
mov lr, r1
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
rsb r3, r0, #0
ands r3, r3, #0x1C
beq 3f
cmp r3, r2
andhi r3, r2, #0x1C
sub r2, r2, r3
/* conditionally writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28
stmcsia r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2
strcs r1, [r0], #4
3:
subs r2, r2, #32
mov r3, r1
bmi 2f
1: subs r2, r2, #32
stmia r0!, {r1,r3,r4,r5,r6,r7,r12,lr}
bhs 1b
2: add r2, r2, #32
/* conditionally stores 0 to 31 bytes */
movs r2, r2, lsl #28
stmcsia r0!, {r1,r3,r12,lr}
stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2
strcs r1, [r0], #4
strmih r1, [r0], #2
movs r2, r2, lsl #2
strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, lr}
bx lr
END(memset)

View File

@@ -0,0 +1,544 @@
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
#ifdef __ARMEB__
#define S2LOMEM lsl
#define S2LOMEMEQ lsleq
#define S2HIMEM lsr
#define MSB 0x000000ff
#define LSB 0xff000000
#define BYTE0_OFFSET 24
#define BYTE1_OFFSET 16
#define BYTE2_OFFSET 8
#define BYTE3_OFFSET 0
#else /* not __ARMEB__ */
#define S2LOMEM lsr
#define S2LOMEMEQ lsreq
#define S2HIMEM lsl
#define BYTE0_OFFSET 0
#define BYTE1_OFFSET 8
#define BYTE2_OFFSET 16
#define BYTE3_OFFSET 24
#define MSB 0xff000000
#define LSB 0x000000ff
#endif /* not __ARMEB__ */
.syntax unified
#if defined (__thumb__)
.thumb
.thumb_func
#endif
ENTRY(strcmp)
/* Use LDRD whenever possible. */
/* The main thing to look out for when comparing large blocks is that
the loads do not cross a page boundary when loading past the index
of the byte with the first difference or the first string-terminator.
For example, if the strings are identical and the string-terminator
is at index k, byte by byte comparison will not load beyond address
s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
k; double word - up to 7 bytes. If the load of these bytes crosses
a page boundary, it might cause a memory fault (if the page is not mapped)
that would not have happened in byte by byte comparison.
If an address is (double) word aligned, then a load of a (double) word
from that address will not cross a page boundary.
Therefore, the algorithm below considers word and double-word alignment
of strings separately. */
/* High-level description of the algorithm.
* The fast path: if both strings are double-word aligned,
use LDRD to load two words from each string in every loop iteration.
* If the strings have the same offset from a word boundary,
use LDRB to load and compare byte by byte until
the first string is aligned to a word boundary (at most 3 bytes).
This is optimized for quick return on short unaligned strings.
* If the strings have the same offset from a double-word boundary,
use LDRD to load two words from each string in every loop iteration, as in the fast path.
* If the strings do not have the same offset from a double-word boundary,
load a word from the second string before the loop to initialize the queue.
Use LDRD to load two words from every string in every loop iteration.
Inside the loop, load the second word from the second string only after comparing
the first word, using the queued value, to guarantee safety across page boundaries.
* If the strings do not have the same offset from a word boundary,
use LDR and a shift queue. Order of loads and comparisons matters,
similarly to the previous case.
* Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
* The only difference between ARM and Thumb modes is the use of CBZ instruction.
* The only difference between big and little endian is the use of REV in little endian
to compute the return value, instead of MOV.
*/
.macro m_cbz reg label
#ifdef __thumb2__
cbz \reg, \label
#else /* not defined __thumb2__ */
cmp \reg, #0
beq \label
#endif /* not defined __thumb2__ */
.endm /* m_cbz */
.macro m_cbnz reg label
#ifdef __thumb2__
cbnz \reg, \label
#else /* not defined __thumb2__ */
cmp \reg, #0
bne \label
#endif /* not defined __thumb2__ */
.endm /* m_cbnz */
.macro init
/* Macro to save temporary registers and prepare magic values. */
subs sp, sp, #16
strd r4, r5, [sp, #8]
strd r6, r7, [sp]
mvn r6, #0 /* all F */
mov r7, #0 /* all 0 */
.endm /* init */
.macro magic_compare_and_branch w1 w2 label
/* Macro to compare registers w1 and w2 and conditionally branch to label. */
cmp \w1, \w2 /* Are w1 and w2 the same? */
magic_find_zero_bytes \w1
it eq
cmpeq ip, #0 /* Is there a zero byte in w1? */
bne \label
.endm /* magic_compare_and_branch */
.macro magic_find_zero_bytes w1
/* Macro to find all-zero bytes in w1, result is in ip. */
#if (defined (__ARM_FEATURE_DSP))
uadd8 ip, \w1, r6
sel ip, r7, r6
#else /* not defined (__ARM_FEATURE_DSP) */
/* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
Coincidently, these processors only have Thumb-2 mode, where we can use the
the (large) magic constant available directly as an immediate in instructions.
Note that we cannot use the magic constant in ARM mode, where we need
to create the constant in a register. */
sub ip, \w1, #0x01010101
bic ip, ip, \w1
and ip, ip, #0x80808080
#endif /* not defined (__ARM_FEATURE_DSP) */
.endm /* magic_find_zero_bytes */
.macro setup_return w1 w2
#ifdef __ARMEB__
mov r1, \w1
mov r2, \w2
#else /* not __ARMEB__ */
rev r1, \w1
rev r2, \w2
#endif /* not __ARMEB__ */
.endm /* setup_return */
pld [r0, #0]
pld [r1, #0]
/* Are both strings double-word aligned? */
orr ip, r0, r1
tst ip, #7
bne do_align
/* Fast path. */
init
doubleword_aligned:
/* Get here when the strings to compare are double-word aligned. */
/* Compare two words in every iteration. */
.p2align 2
2:
pld [r0, #16]
pld [r1, #16]
/* Load the next double-word from each string. */
ldrd r2, r3, [r0], #8
ldrd r4, r5, [r1], #8
magic_compare_and_branch w1=r2, w2=r4, label=return_24
magic_compare_and_branch w1=r3, w2=r5, label=return_35
b 2b
do_align:
/* Is the first string word-aligned? */
ands ip, r0, #3
beq word_aligned_r0
/* Fast compare byte by byte until the first string is word-aligned. */
/* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
to read until the next word boundary is 4-ip. */
bic r0, r0, #3
ldr r2, [r0], #4
lsls ip, ip, #31
beq byte2
bcs byte3
byte1:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE1_OFFSET
subs ip, r3, ip
bne fast_return
m_cbz reg=r3, label=fast_return
byte2:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE2_OFFSET
subs ip, r3, ip
bne fast_return
m_cbz reg=r3, label=fast_return
byte3:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE3_OFFSET
subs ip, r3, ip
bne fast_return
m_cbnz reg=r3, label=word_aligned_r0
fast_return:
mov r0, ip
bx lr
word_aligned_r0:
init
/* The first string is word-aligned. */
/* Is the second string word-aligned? */
ands ip, r1, #3
bne strcmp_unaligned
word_aligned:
/* The strings are word-aligned. */
/* Is the first string double-word aligned? */
tst r0, #4
beq doubleword_aligned_r0
/* If r0 is not double-word aligned yet, align it by loading
and comparing the next word from each string. */
ldr r2, [r0], #4
ldr r4, [r1], #4
magic_compare_and_branch w1=r2 w2=r4 label=return_24
doubleword_aligned_r0:
/* Get here when r0 is double-word aligned. */
/* Is r1 doubleword_aligned? */
tst r1, #4
beq doubleword_aligned
/* Get here when the strings to compare are word-aligned,
r0 is double-word aligned, but r1 is not double-word aligned. */
/* Initialize the queue. */
ldr r5, [r1], #4
/* Compare two words in every iteration. */
.p2align 2
3:
pld [r0, #16]
pld [r1, #16]
/* Load the next double-word from each string and compare. */
ldrd r2, r3, [r0], #8
magic_compare_and_branch w1=r2 w2=r5 label=return_25
ldrd r4, r5, [r1], #8
magic_compare_and_branch w1=r3 w2=r4 label=return_34
b 3b
.macro miscmp_word offsetlo offsethi
/* Macro to compare misaligned strings. */
/* r0, r1 are word-aligned, and at least one of the strings
is not double-word aligned. */
/* Compare one word in every loop iteration. */
/* OFFSETLO is the original bit-offset of r1 from a word-boundary,
OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
/* Initialize the shift queue. */
ldr r5, [r1], #4
/* Compare one word from each string in every loop iteration. */
.p2align 2
7:
ldr r3, [r0], #4
S2LOMEM r5, r5, #\offsetlo
magic_find_zero_bytes w1=r3
cmp r7, ip, S2HIMEM #\offsetlo
and r2, r3, r6, S2LOMEM #\offsetlo
it eq
cmpeq r2, r5
bne return_25
ldr r5, [r1], #4
cmp ip, #0
eor r3, r2, r3
S2HIMEM r2, r5, #\offsethi
it eq
cmpeq r3, r2
bne return_32
b 7b
.endm /* miscmp_word */
return_32:
setup_return w1=r3, w2=r2
b do_return
return_34:
setup_return w1=r3, w2=r4
b do_return
return_25:
setup_return w1=r2, w2=r5
b do_return
return_35:
setup_return w1=r3, w2=r5
b do_return
return_24:
setup_return w1=r2, w2=r4
do_return:
#ifdef __ARMEB__
mov r0, ip
#else /* not __ARMEB__ */
rev r0, ip
#endif /* not __ARMEB__ */
/* Restore temporaries early, before computing the return value. */
ldrd r6, r7, [sp]
ldrd r4, r5, [sp, #8]
adds sp, sp, #16
/* There is a zero or a different byte between r1 and r2. */
/* r0 contains a mask of all-zero bytes in r1. */
/* Using r0 and not ip here because cbz requires low register. */
m_cbz reg=r0, label=compute_return_value
clz r0, r0
/* r0 contains the number of bits on the left of the first all-zero byte in r1. */
rsb r0, r0, #24
/* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
lsr r1, r1, r0
lsr r2, r2, r0
compute_return_value:
movs r0, #1
cmp r1, r2
/* The return value is computed as follows.
If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
which means r0:=r0-r0-1 and r0 is #-1 at return.
If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
which means r0:=r0-r0 and r0 is #0 at return.
(C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
it ls
sbcls r0, r0, r0
bx lr
/* The code from the previous version of strcmp.S handles all of the
* cases where the first string and seconds string cannot both be
* aligned to a word boundary faster than the new algorithm. See
* bionic/libc/arch-arm/cortex-a15/bionic/strcmp.S for the unedited
* version of the code.
*/
strcmp_unaligned:
wp1 .req r0
wp2 .req r1
b1 .req r2
w1 .req r4
w2 .req r5
t1 .req ip
@ r3 is scratch
2:
mov b1, #1
orr b1, b1, b1, lsl #8
orr b1, b1, b1, lsl #16
and t1, wp2, #3
bic wp2, wp2, #3
ldr w1, [wp1], #4
ldr w2, [wp2], #4
cmp t1, #2
beq 2f
bhi 3f
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
1:
bic t1, w1, #MSB
cmp t1, w2, S2LOMEM #8
sub r3, w1, b1
bic r3, r3, w1
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, S2HIMEM #24
bne 6f
ldr w1, [wp1], #4
b 1b
4:
S2LOMEM w2, w2, #8
b 8f
5:
#ifdef __ARMEB__
/* The syndrome value may contain false ones if the string ends
* with the bytes 0x01 0x00
*/
tst w1, #0xff000000
itt ne
tstne w1, #0x00ff0000
tstne w1, #0x0000ff00
beq 7f
#else
bics r3, r3, #0xff000000
bne 7f
#endif
ldrb w2, [wp2]
S2LOMEM t1, w1, #24
#ifdef __ARMEB__
lsl w2, w2, #24
#endif
b 8f
6:
S2LOMEM t1, w1, #24
and w2, w2, #LSB
b 8f
/* Critical inner Loop: Block with 2 bytes initial overlap */
.p2align 2
2:
S2HIMEM t1, w1, #16
sub r3, w1, b1
S2LOMEM t1, t1, #16
bic r3, r3, w1
cmp t1, w2, S2LOMEM #16
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, S2HIMEM #16
bne 6f
ldr w1, [wp1], #4
b 2b
5:
#ifdef __ARMEB__
/* The syndrome value may contain false ones if the string ends
* with the bytes 0x01 0x00
*/
tst w1, #0xff000000
it ne
tstne w1, #0x00ff0000
beq 7f
#else
lsls r3, r3, #16
bne 7f
#endif
ldrh w2, [wp2]
S2LOMEM t1, w1, #16
#ifdef __ARMEB__
lsl w2, w2, #16
#endif
b 8f
6:
S2HIMEM w2, w2, #16
S2LOMEM t1, w1, #16
4:
S2LOMEM w2, w2, #16
b 8f
/* Critical inner Loop: Block with 1 byte initial overlap */
.p2align 2
3:
and t1, w1, #LSB
cmp t1, w2, S2LOMEM #24
sub r3, w1, b1
bic r3, r3, w1
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, S2HIMEM #8
bne 6f
ldr w1, [wp1], #4
b 3b
4:
S2LOMEM w2, w2, #24
b 8f
5:
/* The syndrome value may contain false ones if the string ends
* with the bytes 0x01 0x00
*/
tst w1, #LSB
beq 7f
ldr w2, [wp2], #4
6:
S2LOMEM t1, w1, #8
bic w2, w2, #MSB
b 8f
7:
mov r0, #0
/* Restore registers and stack. */
ldrd r6, r7, [sp]
ldrd r4, r5, [sp, #8]
adds sp, sp, #16
bx lr
8:
and r2, t1, #LSB
and r0, w2, #LSB
cmp r0, #1
it cs
cmpcs r0, r2
itt eq
S2LOMEMEQ t1, t1, #8
S2LOMEMEQ w2, w2, #8
beq 8b
sub r0, r2, r0
/* Restore registers and stack. */
ldrd r6, r7, [sp]
ldrd r4, r5, [sp, #8]
adds sp, sp, #16
bx lr
END(strcmp)

View File

@@ -0,0 +1,7 @@
$(call libc-add-cpu-variant-src,MEMCPY,arch-arm/cortex-a9/bionic/memcpy.S)
$(call libc-add-cpu-variant-src,MEMSET,arch-arm/cortex-a9/bionic/memset.S)
$(call libc-add-cpu-variant-src,STRCMP,arch-arm/cortex-a9/bionic/strcmp.S)
# Use cortex-a15 version of strlen.
$(call libc-add-cpu-variant-src,STRLEN,arch-arm/cortex-a15/bionic/strlen.S)
include bionic/libc/arch-arm/generic/generic.mk

View File

@@ -0,0 +1,380 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* Optimized memcpy() for ARM.
*
* note that memcpy() always returns the destination pointer,
* so we have to preserve R0.
*/
ENTRY(memcpy)
/* The stack must always be 64-bits aligned to be compliant with the
* ARM ABI. Since we have to save R0, we might as well save R4
* which we can use for better pipelining of the reads below
*/
.save {r0, r4, lr}
stmfd sp!, {r0, r4, lr}
/* Making room for r5-r11 which will be spilled later */
.pad #28
sub sp, sp, #28
// preload the destination because we'll align it to a cache line
// with small writes. Also start the source "pump".
PLD (r0, #0)
PLD (r1, #0)
PLD (r1, #32)
/* it simplifies things to take care of len<4 early */
cmp r2, #4
blo copy_last_3_and_return
/* compute the offset to align the source
* offset = (4-(src&3))&3 = -src & 3
*/
rsb r3, r1, #0
ands r3, r3, #3
beq src_aligned
/* align source to 32 bits. We need to insert 2 instructions between
* a ldr[b|h] and str[b|h] because byte and half-word instructions
* stall 2 cycles.
*/
movs r12, r3, lsl #31
sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
ldrmib r3, [r1], #1
ldrcsb r4, [r1], #1
ldrcsb r12,[r1], #1
strmib r3, [r0], #1
strcsb r4, [r0], #1
strcsb r12,[r0], #1
src_aligned:
/* see if src and dst are aligned together (congruent) */
eor r12, r0, r1
tst r12, #3
bne non_congruent
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
/* align the destination to a cache-line */
rsb r3, r0, #0
ands r3, r3, #0x1C
beq congruent_aligned32
cmp r3, r2
andhi r3, r2, #0x1C
/* conditionally copies 0 to 7 words (length in r3) */
movs r12, r3, lsl #28
ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r8, r9}
tst r3, #0x4
ldrne r10,[r1], #4 /* 4 bytes */
strne r10,[r0], #4
sub r2, r2, r3
congruent_aligned32:
/*
* here source is aligned to 32 bytes.
*/
cached_aligned32:
subs r2, r2, #32
blo less_than_32_left
/*
* We preload a cache-line up to 64 bytes ahead. On the 926, this will
* stall only until the requested world is fetched, but the linefill
* continues in the the background.
* While the linefill is going, we write our previous cache-line
* into the write-buffer (which should have some free space).
* When the linefill is done, the writebuffer will
* start dumping its content into memory
*
* While all this is going, we then load a full cache line into
* 8 registers, this cache line should be in the cache by now
* (or partly in the cache).
*
* This code should work well regardless of the source/dest alignment.
*
*/
// Align the preload register to a cache-line because the cpu does
// "critical word first" (the first word requested is loaded first).
bic r12, r1, #0x1F
add r12, r12, #64
1: ldmia r1!, { r4-r11 }
PLD (r12, #64)
subs r2, r2, #32
// NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
// for ARM9 preload will not be safely guarded by the preceding subs.
// When it is safely guarded the only possibility to have SIGSEGV here
// is because the caller overstates the length.
ldrhi r3, [r12], #32 /* cheap ARM9 preload */
stmia r0!, { r4-r11 }
bhs 1b
add r2, r2, #32
less_than_32_left:
/*
* less than 32 bytes left at this point (length in r2)
*/
/* skip all this if there is nothing to do, which should
* be a common case (if not executed the code below takes
* about 16 cycles)
*/
tst r2, #0x1F
beq 1f
/* conditionnaly copies 0 to 31 bytes */
movs r12, r2, lsl #28
ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r8, r9}
movs r12, r2, lsl #30
ldrcs r3, [r1], #4 /* 4 bytes */
ldrmih r4, [r1], #2 /* 2 bytes */
strcs r3, [r0], #4
strmih r4, [r0], #2
tst r2, #0x1
ldrneb r3, [r1] /* last byte */
strneb r3, [r0]
/* we're done! restore everything and return */
1: ldmfd sp!, {r5-r11}
ldmfd sp!, {r0, r4, lr}
bx lr
/********************************************************************/
non_congruent:
/*
* here source is aligned to 4 bytes
* but destination is not.
*
* in the code below r2 is the number of bytes read
* (the number of bytes written is always smaller, because we have
* partial words in the shift queue)
*/
cmp r2, #4
blo copy_last_3_and_return
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
/* compute shifts needed to align src to dest */
rsb r5, r0, #0
and r5, r5, #3 /* r5 = # bytes in partial words */
mov r12, r5, lsl #3 /* r12 = right */
rsb lr, r12, #32 /* lr = left */
/* read the first word */
ldr r3, [r1], #4
sub r2, r2, #4
/* write a partial word (0 to 3 bytes), such that destination
* becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
*/
movs r5, r5, lsl #31
strmib r3, [r0], #1
movmi r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
cmp r2, #4
blo partial_word_tail
/* Align destination to 32 bytes (cache line boundary) */
1: tst r0, #0x1c
beq 2f
ldr r5, [r1], #4
sub r2, r2, #4
orr r4, r3, r5, lsl lr
mov r3, r5, lsr r12
str r4, [r0], #4
cmp r2, #4
bhs 1b
blo partial_word_tail
/* copy 32 bytes at a time */
2: subs r2, r2, #32
blo less_than_thirtytwo
/* Use immediate mode for the shifts, because there is an extra cycle
* for register shifts, which could account for up to 50% of
* performance hit.
*/
cmp r12, #24
beq loop24
cmp r12, #8
beq loop8
loop16:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
mov r6, r6, lsr #16
orr r6, r6, r7, lsl #16
mov r7, r7, lsr #16
orr r7, r7, r8, lsl #16
mov r8, r8, lsr #16
orr r8, r8, r9, lsl #16
mov r9, r9, lsr #16
orr r9, r9, r10, lsl #16
mov r10, r10, lsr #16
orr r10, r10, r11, lsl #16
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #16
bhs 1b
b less_than_thirtytwo
loop8:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r6, lsl #24
mov r6, r6, lsr #8
orr r6, r6, r7, lsl #24
mov r7, r7, lsr #8
orr r7, r7, r8, lsl #24
mov r8, r8, lsr #8
orr r8, r8, r9, lsl #24
mov r9, r9, lsr #8
orr r9, r9, r10, lsl #24
mov r10, r10, lsr #8
orr r10, r10, r11, lsl #24
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #8
bhs 1b
b less_than_thirtytwo
loop24:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r6, lsl #8
mov r6, r6, lsr #24
orr r6, r6, r7, lsl #8
mov r7, r7, lsr #24
orr r7, r7, r8, lsl #8
mov r8, r8, lsr #24
orr r8, r8, r9, lsl #8
mov r9, r9, lsr #24
orr r9, r9, r10, lsl #8
mov r10, r10, lsr #24
orr r10, r10, r11, lsl #8
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #24
bhs 1b
less_than_thirtytwo:
/* copy the last 0 to 31 bytes of the source */
rsb r12, lr, #32 /* we corrupted r12, recompute it */
add r2, r2, #32
cmp r2, #4
blo partial_word_tail
1: ldr r5, [r1], #4
sub r2, r2, #4
orr r4, r3, r5, lsl lr
mov r3, r5, lsr r12
str r4, [r0], #4
cmp r2, #4
bhs 1b
partial_word_tail:
/* we have a partial word in the input buffer */
movs r5, lr, lsl #(31-3)
strmib r3, [r0], #1
movmi r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
strcsb r3, [r0], #1
/* Refill spilled registers from the stack. Don't update sp. */
ldmfd sp, {r5-r11}
copy_last_3_and_return:
movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
ldrmib r2, [r1], #1
ldrcsb r3, [r1], #1
ldrcsb r12,[r1]
strmib r2, [r0], #1
strcsb r3, [r0], #1
strcsb r12,[r0]
/* we're done! restore sp and spilled registers and return */
add sp, sp, #28
ldmfd sp!, {r0, r4, lr}
bx lr
END(memcpy)

View File

@@ -0,0 +1,109 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* Optimized memset() for ARM.
*
* memset() returns its first argument.
*/
ENTRY(bzero)
mov r2, r1
mov r1, #0
END(bzero)
ENTRY(memset)
/* compute the offset to align the destination
* offset = (4-(src&3))&3 = -src & 3
*/
.save {r0, r4-r7, lr}
stmfd sp!, {r0, r4-r7, lr}
rsb r3, r0, #0
ands r3, r3, #3
cmp r3, r2
movhi r3, r2
/* splat r1 */
mov r1, r1, lsl #24
orr r1, r1, r1, lsr #8
orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31
strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strcsb r1, [r0], #1
strmib r1, [r0], #1
subs r2, r2, r3
ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */
mov r12, r1
mov lr, r1
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
rsb r3, r0, #0
ands r3, r3, #0x1C
beq 3f
cmp r3, r2
andhi r3, r2, #0x1C
sub r2, r2, r3
/* conditionally writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28
stmcsia r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2
strcs r1, [r0], #4
3:
subs r2, r2, #32
mov r3, r1
bmi 2f
1: subs r2, r2, #32
stmia r0!, {r1,r3,r4,r5,r6,r7,r12,lr}
bhs 1b
2: add r2, r2, #32
/* conditionally stores 0 to 31 bytes */
movs r2, r2, lsl #28
stmcsia r0!, {r1,r3,r12,lr}
stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2
strcs r1, [r0], #4
strmih r1, [r0], #2
movs r2, r2, lsl #2
strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, lr}
bx lr
END(memset)

View File

@@ -0,0 +1,317 @@
/*
* Copyright (c) 2011 The Android Open Source Project
* Copyright (c) 2008 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
.text
#ifdef __ARMEB__
#define SHFT2LSB lsl
#define SHFT2LSBEQ lsleq
#define SHFT2MSB lsr
#define SHFT2MSBEQ lsreq
#define MSB 0x000000ff
#define LSB 0xff000000
#else
#define SHFT2LSB lsr
#define SHFT2LSBEQ lsreq
#define SHFT2MSB lsl
#define SHFT2MSBEQ lsleq
#define MSB 0xff000000
#define LSB 0x000000ff
#endif
#define magic1(REG) REG
#define magic2(REG) REG, lsl #7
ENTRY(strcmp)
PLD(r0, #0)
PLD(r1, #0)
eor r2, r0, r1
tst r2, #3
/* Strings not at same byte offset from a word boundary. */
bne .Lstrcmp_unaligned
ands r2, r0, #3
bic r0, r0, #3
bic r1, r1, #3
ldr ip, [r0], #4
it eq
ldreq r3, [r1], #4
beq 1f
/* Although s1 and s2 have identical initial alignment, they are
* not currently word aligned. Rather than comparing bytes,
* make sure that any bytes fetched from before the addressed
* bytes are forced to 0xff. Then they will always compare
* equal.
*/
eor r2, r2, #3
lsl r2, r2, #3
mvn r3, #MSB
SHFT2LSB r2, r3, r2
ldr r3, [r1], #4
orr ip, ip, r2
orr r3, r3, r2
1:
/* Load the 'magic' constant 0x01010101. */
str r4, [sp, #-4]!
mov r4, #1
orr r4, r4, r4, lsl #8
orr r4, r4, r4, lsl #16
.p2align 2
4:
PLD(r0, #8)
PLD(r1, #8)
sub r2, ip, magic1(r4)
cmp ip, r3
itttt eq
/* check for any zero bytes in first word */
biceq r2, r2, ip
tsteq r2, magic2(r4)
ldreq ip, [r0], #4
ldreq r3, [r1], #4
beq 4b
2:
/* There's a zero or a different byte in the word */
SHFT2MSB r0, ip, #24
SHFT2LSB ip, ip, #8
cmp r0, #1
it cs
cmpcs r0, r3, SHFT2MSB #24
it eq
SHFT2LSBEQ r3, r3, #8
beq 2b
/* On a big-endian machine, r0 contains the desired byte in bits
* 0-7; on a little-endian machine they are in bits 24-31. In
* both cases the other bits in r0 are all zero. For r3 the
* interesting byte is at the other end of the word, but the
* other bits are not necessarily zero. We need a signed result
* representing the differnece in the unsigned bytes, so for the
* little-endian case we can't just shift the interesting bits up.
*/
#ifdef __ARMEB__
sub r0, r0, r3, lsr #24
#else
and r3, r3, #255
/* No RSB instruction in Thumb2 */
#ifdef __thumb2__
lsr r0, r0, #24
sub r0, r0, r3
#else
rsb r0, r3, r0, lsr #24
#endif
#endif
ldr r4, [sp], #4
bx lr
.Lstrcmp_unaligned:
wp1 .req r0
wp2 .req r1
b1 .req r2
w1 .req r4
w2 .req r5
t1 .req ip
@ r3 is scratch
/* First of all, compare bytes until wp1(sp1) is word-aligned. */
1:
tst wp1, #3
beq 2f
ldrb r2, [wp1], #1
ldrb r3, [wp2], #1
cmp r2, #1
it cs
cmpcs r2, r3
beq 1b
sub r0, r2, r3
bx lr
2:
str r5, [sp, #-4]!
str r4, [sp, #-4]!
mov b1, #1
orr b1, b1, b1, lsl #8
orr b1, b1, b1, lsl #16
and t1, wp2, #3
bic wp2, wp2, #3
ldr w1, [wp1], #4
ldr w2, [wp2], #4
cmp t1, #2
beq 2f
bhi 3f
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
1:
bic t1, w1, #MSB
cmp t1, w2, SHFT2LSB #8
sub r3, w1, b1
bic r3, r3, w1
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, SHFT2MSB #24
bne 6f
ldr w1, [wp1], #4
b 1b
4:
SHFT2LSB w2, w2, #8
b 8f
5:
#ifdef __ARMEB__
/* The syndrome value may contain false ones if the string ends
* with the bytes 0x01 0x00
*/
tst w1, #0xff000000
itt ne
tstne w1, #0x00ff0000
tstne w1, #0x0000ff00
beq 7f
#else
bics r3, r3, #0xff000000
bne 7f
#endif
ldrb w2, [wp2]
SHFT2LSB t1, w1, #24
#ifdef __ARMEB__
lsl w2, w2, #24
#endif
b 8f
6:
SHFT2LSB t1, w1, #24
and w2, w2, #LSB
b 8f
/* Critical inner Loop: Block with 2 bytes initial overlap */
.p2align 2
2:
SHFT2MSB t1, w1, #16
sub r3, w1, b1
SHFT2LSB t1, t1, #16
bic r3, r3, w1
cmp t1, w2, SHFT2LSB #16
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, SHFT2MSB #16
bne 6f
ldr w1, [wp1], #4
b 2b
5:
#ifdef __ARMEB__
/* The syndrome value may contain false ones if the string ends
* with the bytes 0x01 0x00
*/
tst w1, #0xff000000
it ne
tstne w1, #0x00ff0000
beq 7f
#else
lsls r3, r3, #16
bne 7f
#endif
ldrh w2, [wp2]
SHFT2LSB t1, w1, #16
#ifdef __ARMEB__
lsl w2, w2, #16
#endif
b 8f
6:
SHFT2MSB w2, w2, #16
SHFT2LSB t1, w1, #16
4:
SHFT2LSB w2, w2, #16
b 8f
/* Critical inner Loop: Block with 1 byte initial overlap */
.p2align 2
3:
and t1, w1, #LSB
cmp t1, w2, SHFT2LSB #24
sub r3, w1, b1
bic r3, r3, w1
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, SHFT2MSB #8
bne 6f
ldr w1, [wp1], #4
b 3b
4:
SHFT2LSB w2, w2, #24
b 8f
5:
/* The syndrome value may contain false ones if the string ends
* with the bytes 0x01 0x00
*/
tst w1, #LSB
beq 7f
ldr w2, [wp2], #4
6:
SHFT2LSB t1, w1, #8
bic w2, w2, #MSB
b 8f
7:
mov r0, #0
ldr r4, [sp], #4
ldr r5, [sp], #4
bx lr
8:
and r2, t1, #LSB
and r0, w2, #LSB
cmp r0, #1
it cs
cmpcs r0, r2
itt eq
SHFT2LSBEQ t1, t1, #8
SHFT2LSBEQ w2, w2, #8
beq 8b
sub r0, r2, r0
ldr r4, [sp], #4
ldr r5, [sp], #4
bx lr
END(strcmp)

View File

@@ -33,16 +33,16 @@ size_t strlen(const char *s)
{
__builtin_prefetch(s);
__builtin_prefetch(s+32);
union {
const char *b;
const uint32_t *w;
uintptr_t i;
} u;
// these are some scratch variables for the asm code below
uint32_t v, t;
// initialize the string length to zero
size_t l = 0;
@@ -60,52 +60,60 @@ size_t strlen(const char *s)
// We need to process 32 bytes per loop to schedule PLD properly
// and achieve the maximum bus speed.
asm(
"ldr %[v], [ %[s] ], #4 \n"
"ldr %[v], [%[s]], #4 \n"
"sub %[l], %[l], %[s] \n"
"0: \n"
#if __ARM_HAVE_PLD
"pld [ %[s], #64 ] \n"
"pld [%[s], #64] \n"
#endif
"sub %[t], %[v], %[mask], lsr #7\n"
"and %[t], %[t], %[mask] \n"
"bics %[t], %[t], %[v] \n"
"ldreq %[v], [ %[s] ], #4 \n"
"it eq \n"
"ldreq %[v], [%[s]], #4 \n"
#if !defined(__OPTIMIZE_SIZE__)
"bne 1f \n"
"sub %[t], %[v], %[mask], lsr #7\n"
"and %[t], %[t], %[mask] \n"
"bics %[t], %[t], %[v] \n"
"ldreq %[v], [ %[s] ], #4 \n"
"it eq \n"
"ldreq %[v], [%[s]], #4 \n"
"bne 1f \n"
"sub %[t], %[v], %[mask], lsr #7\n"
"and %[t], %[t], %[mask] \n"
"bics %[t], %[t], %[v] \n"
"ldreq %[v], [ %[s] ], #4 \n"
"it eq \n"
"ldreq %[v], [%[s]], #4 \n"
"bne 1f \n"
"sub %[t], %[v], %[mask], lsr #7\n"
"and %[t], %[t], %[mask] \n"
"bics %[t], %[t], %[v] \n"
"ldreq %[v], [ %[s] ], #4 \n"
"it eq \n"
"ldreq %[v], [%[s]], #4 \n"
"bne 1f \n"
"sub %[t], %[v], %[mask], lsr #7\n"
"and %[t], %[t], %[mask] \n"
"bics %[t], %[t], %[v] \n"
"ldreq %[v], [ %[s] ], #4 \n"
"it eq \n"
"ldreq %[v], [%[s]], #4 \n"
"bne 1f \n"
"sub %[t], %[v], %[mask], lsr #7\n"
"and %[t], %[t], %[mask] \n"
"bics %[t], %[t], %[v] \n"
"ldreq %[v], [ %[s] ], #4 \n"
"it eq \n"
"ldreq %[v], [%[s]], #4 \n"
"bne 1f \n"
"sub %[t], %[v], %[mask], lsr #7\n"
"and %[t], %[t], %[mask] \n"
"bics %[t], %[t], %[v] \n"
"ldreq %[v], [ %[s] ], #4 \n"
"it eq \n"
"ldreq %[v], [%[s]], #4 \n"
"bne 1f \n"
"sub %[t], %[v], %[mask], lsr #7\n"
"and %[t], %[t], %[mask] \n"
"bics %[t], %[t], %[v] \n"
"ldreq %[v], [ %[s] ], #4 \n"
"it eq \n"
"ldreq %[v], [%[s]], #4 \n"
#endif
"beq 0b \n"
"1: \n"
@@ -117,13 +125,14 @@ size_t strlen(const char *s)
"beq 2f \n"
"add %[l], %[l], #1 \n"
"tst %[v], #0xFF0000 \n"
"it ne \n"
"addne %[l], %[l], #1 \n"
"2: \n"
: [l]"=&r"(l), [v]"=&r"(v), [t]"=&r"(t), [s]"=&r"(u.b)
: "%[l]"(l), "%[s]"(u.b), [mask]"r"(0x80808080UL)
: "cc"
);
done:
return l;
}

View File

@@ -0,0 +1,4 @@
$(call libc-add-cpu-variant-src,MEMCPY,arch-arm/generic/bionic/memcpy.S)
$(call libc-add-cpu-variant-src,MEMSET,arch-arm/generic/bionic/memset.S)
$(call libc-add-cpu-variant-src,STRCMP,arch-arm/generic/bionic/strcmp.S)
$(call libc-add-cpu-variant-src,STRLEN,arch-arm/generic/bionic/strlen.c)

View File

@@ -0,0 +1,146 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Assumes neon instructions and a cache line size of 32 bytes. */
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions, that supports neon instructions, and that has a 32 byte
* cache line.
*/
.text
.fpu neon
#define CACHE_LINE_SIZE 32
ENTRY(memcpy)
.save {r0, lr}
/* start preloading as early as possible */
pld [r1, #(CACHE_LINE_SIZE*0)]
stmfd sp!, {r0, lr}
pld [r1, #(CACHE_LINE_SIZE*2)]
/* do we have at least 16-bytes to copy (needed for alignment below) */
cmp r2, #16
blo 5f
/* align destination to cache-line for the write-buffer */
rsb r3, r0, #0
ands r3, r3, #0xF
beq 0f
/* copy up to 15-bytes (count in r3) */
sub r2, r2, r3
movs ip, r3, lsl #31
ldrmib lr, [r1], #1
strmib lr, [r0], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// copies 4 bytes, destination 32-bits aligned
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
1: bcc 2f
// copies 8 bytes, destination 64-bits aligned
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
2:
0: /* preload immediately the next cache line, which we may need */
pld [r1, #(CACHE_LINE_SIZE*0)]
pld [r1, #(CACHE_LINE_SIZE*2)]
/* make sure we have at least 64 bytes to copy */
subs r2, r2, #64
blo 2f
/* Preload all the cache lines we need.
* NOTE: The number of pld below depends on CACHE_LINE_SIZE,
* ideally we would increase the distance in the main loop to
* avoid the goofy code below. In practice this doesn't seem to make
* a big difference.
* NOTE: The value CACHE_LINE_SIZE * 8 was chosen through
* experimentation.
*/
pld [r1, #(CACHE_LINE_SIZE*4)]
pld [r1, #(CACHE_LINE_SIZE*6)]
pld [r1, #(CACHE_LINE_SIZE*8)]
1: /* The main loop copies 64 bytes at a time */
vld1.8 {d0 - d3}, [r1]!
vld1.8 {d4 - d7}, [r1]!
pld [r1, #(CACHE_LINE_SIZE*8)]
subs r2, r2, #64
vst1.8 {d0 - d3}, [r0, :128]!
vst1.8 {d4 - d7}, [r0, :128]!
bhs 1b
2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
add r2, r2, #64
subs r2, r2, #32
blo 4f
3: /* 32 bytes at a time. These cache lines were already preloaded */
vld1.8 {d0 - d3}, [r1]!
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0, :128]!
bhs 3b
4: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 5f
// copies 16 bytes, 128-bits aligned
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0, :128]!
5: /* copy up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]!
1: bge 2f
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
2: movs ip, r2, lsl #31
ldrmib r3, [r1], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strmib r3, [r0], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
ldmfd sp!, {r0, lr}
bx lr
END(memcpy)

View File

@@ -0,0 +1,81 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions, that supports neon instructions, and that supports
* unaligned neon instruction accesses to memory.
*/
.fpu neon
ENTRY(bzero)
mov r2, r1
mov r1, #0
END(bzero)
/* memset() returns its first argument. */
ENTRY(memset)
.save {r0}
stmfd sp!, {r0}
vdup.8 q0, r1
/* make sure we have at least 32 bytes to write */
subs r2, r2, #32
blo 2f
vmov q1, q0
1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0]!
bhs 1b
2: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 3f
// writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r0]!
3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r0]!
1: bge 2f
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
strmib r1, [r0], #1
strcsb r1, [r0], #1
strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr
END(memset)

View File

@@ -0,0 +1,477 @@
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
#ifdef __ARMEB__
#define S2LOMEM lsl
#define S2LOMEMEQ lsleq
#define S2HIMEM lsr
#define MSB 0x000000ff
#define LSB 0xff000000
#define BYTE0_OFFSET 24
#define BYTE1_OFFSET 16
#define BYTE2_OFFSET 8
#define BYTE3_OFFSET 0
#else /* not __ARMEB__ */
#define S2LOMEM lsr
#define S2LOMEMEQ lsreq
#define S2HIMEM lsl
#define BYTE0_OFFSET 0
#define BYTE1_OFFSET 8
#define BYTE2_OFFSET 16
#define BYTE3_OFFSET 24
#define MSB 0xff000000
#define LSB 0x000000ff
#endif /* not __ARMEB__ */
.syntax unified
#if defined (__thumb__)
.thumb
.thumb_func
#endif
ENTRY(strcmp)
/* Use LDRD whenever possible. */
/* The main thing to look out for when comparing large blocks is that
the loads do not cross a page boundary when loading past the index
of the byte with the first difference or the first string-terminator.
For example, if the strings are identical and the string-terminator
is at index k, byte by byte comparison will not load beyond address
s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
k; double word - up to 7 bytes. If the load of these bytes crosses
a page boundary, it might cause a memory fault (if the page is not mapped)
that would not have happened in byte by byte comparison.
If an address is (double) word aligned, then a load of a (double) word
from that address will not cross a page boundary.
Therefore, the algorithm below considers word and double-word alignment
of strings separately. */
/* High-level description of the algorithm.
* The fast path: if both strings are double-word aligned,
use LDRD to load two words from each string in every loop iteration.
* If the strings have the same offset from a word boundary,
use LDRB to load and compare byte by byte until
the first string is aligned to a word boundary (at most 3 bytes).
This is optimized for quick return on short unaligned strings.
* If the strings have the same offset from a double-word boundary,
use LDRD to load two words from each string in every loop iteration, as in the fast path.
* If the strings do not have the same offset from a double-word boundary,
load a word from the second string before the loop to initialize the queue.
Use LDRD to load two words from every string in every loop iteration.
Inside the loop, load the second word from the second string only after comparing
the first word, using the queued value, to guarantee safety across page boundaries.
* If the strings do not have the same offset from a word boundary,
use LDR and a shift queue. Order of loads and comparisons matters,
similarly to the previous case.
* Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
* The only difference between ARM and Thumb modes is the use of CBZ instruction.
* The only difference between big and little endian is the use of REV in little endian
to compute the return value, instead of MOV.
*/
.macro m_cbz reg label
#ifdef __thumb2__
cbz \reg, \label
#else /* not defined __thumb2__ */
cmp \reg, #0
beq \label
#endif /* not defined __thumb2__ */
.endm /* m_cbz */
.macro m_cbnz reg label
#ifdef __thumb2__
cbnz \reg, \label
#else /* not defined __thumb2__ */
cmp \reg, #0
bne \label
#endif /* not defined __thumb2__ */
.endm /* m_cbnz */
.macro init
/* Macro to save temporary registers and prepare magic values. */
subs sp, sp, #16
strd r4, r5, [sp, #8]
strd r6, r7, [sp]
mvn r6, #0 /* all F */
mov r7, #0 /* all 0 */
.endm /* init */
.macro magic_compare_and_branch w1 w2 label
/* Macro to compare registers w1 and w2 and conditionally branch to label. */
cmp \w1, \w2 /* Are w1 and w2 the same? */
magic_find_zero_bytes \w1
it eq
cmpeq ip, #0 /* Is there a zero byte in w1? */
bne \label
.endm /* magic_compare_and_branch */
.macro magic_find_zero_bytes w1
/* Macro to find all-zero bytes in w1, result is in ip. */
#if (defined (__ARM_FEATURE_DSP))
uadd8 ip, \w1, r6
sel ip, r7, r6
#else /* not defined (__ARM_FEATURE_DSP) */
/* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
Coincidently, these processors only have Thumb-2 mode, where we can use the
the (large) magic constant available directly as an immediate in instructions.
Note that we cannot use the magic constant in ARM mode, where we need
to create the constant in a register. */
sub ip, \w1, #0x01010101
bic ip, ip, \w1
and ip, ip, #0x80808080
#endif /* not defined (__ARM_FEATURE_DSP) */
.endm /* magic_find_zero_bytes */
.macro setup_return w1 w2
#ifdef __ARMEB__
mov r1, \w1
mov r2, \w2
#else /* not __ARMEB__ */
rev r1, \w1
rev r2, \w2
#endif /* not __ARMEB__ */
.endm /* setup_return */
pld [r0, #0]
pld [r1, #0]
/* Are both strings double-word aligned? */
orr ip, r0, r1
tst ip, #7
bne do_align
/* Fast path. */
init
doubleword_aligned:
/* Get here when the strings to compare are double-word aligned. */
/* Compare two words in every iteration. */
.p2align 2
2:
pld [r0, #16]
pld [r1, #16]
/* Load the next double-word from each string. */
ldrd r2, r3, [r0], #8
ldrd r4, r5, [r1], #8
magic_compare_and_branch w1=r2, w2=r4, label=return_24
magic_compare_and_branch w1=r3, w2=r5, label=return_35
b 2b
do_align:
/* Is the first string word-aligned? */
ands ip, r0, #3
beq word_aligned_r0
/* Fast compare byte by byte until the first string is word-aligned. */
/* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
to read until the next word boundary is 4-ip. */
bic r0, r0, #3
ldr r2, [r0], #4
lsls ip, ip, #31
beq byte2
bcs byte3
byte1:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE1_OFFSET
subs ip, r3, ip
bne fast_return
m_cbz reg=r3, label=fast_return
byte2:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE2_OFFSET
subs ip, r3, ip
bne fast_return
m_cbz reg=r3, label=fast_return
byte3:
ldrb ip, [r1], #1
uxtb r3, r2, ror #BYTE3_OFFSET
subs ip, r3, ip
bne fast_return
m_cbnz reg=r3, label=word_aligned_r0
fast_return:
mov r0, ip
bx lr
word_aligned_r0:
init
/* The first string is word-aligned. */
/* Is the second string word-aligned? */
ands ip, r1, #3
bne strcmp_unaligned
word_aligned:
/* The strings are word-aligned. */
/* Is the first string double-word aligned? */
tst r0, #4
beq doubleword_aligned_r0
/* If r0 is not double-word aligned yet, align it by loading
and comparing the next word from each string. */
ldr r2, [r0], #4
ldr r4, [r1], #4
magic_compare_and_branch w1=r2 w2=r4 label=return_24
doubleword_aligned_r0:
/* Get here when r0 is double-word aligned. */
/* Is r1 doubleword_aligned? */
tst r1, #4
beq doubleword_aligned
/* Get here when the strings to compare are word-aligned,
r0 is double-word aligned, but r1 is not double-word aligned. */
/* Initialize the queue. */
ldr r5, [r1], #4
/* Compare two words in every iteration. */
.p2align 2
3:
pld [r0, #16]
pld [r1, #16]
/* Load the next double-word from each string and compare. */
ldrd r2, r3, [r0], #8
magic_compare_and_branch w1=r2 w2=r5 label=return_25
ldrd r4, r5, [r1], #8
magic_compare_and_branch w1=r3 w2=r4 label=return_34
b 3b
.macro miscmp_word offsetlo offsethi
/* Macro to compare misaligned strings. */
/* r0, r1 are word-aligned, and at least one of the strings
is not double-word aligned. */
/* Compare one word in every loop iteration. */
/* OFFSETLO is the original bit-offset of r1 from a word-boundary,
OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
/* Initialize the shift queue. */
ldr r5, [r1], #4
/* Compare one word from each string in every loop iteration. */
.p2align 2
7:
ldr r3, [r0], #4
S2LOMEM r5, r5, #\offsetlo
magic_find_zero_bytes w1=r3
cmp r7, ip, S2HIMEM #\offsetlo
and r2, r3, r6, S2LOMEM #\offsetlo
it eq
cmpeq r2, r5
bne return_25
ldr r5, [r1], #4
cmp ip, #0
eor r3, r2, r3
S2HIMEM r2, r5, #\offsethi
it eq
cmpeq r3, r2
bne return_32
b 7b
.endm /* miscmp_word */
strcmp_unaligned:
/* r0 is word-aligned, r1 is at offset ip from a word. */
/* Align r1 to the (previous) word-boundary. */
bic r1, r1, #3
/* Unaligned comparison word by word using LDRs. */
cmp ip, #2
beq miscmp_word_16 /* If ip == 2. */
bge miscmp_word_24 /* If ip == 3. */
miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */
miscmp_word_24: miscmp_word offsetlo=24 offsethi=8
return_32:
setup_return w1=r3, w2=r2
b do_return
return_34:
setup_return w1=r3, w2=r4
b do_return
return_25:
setup_return w1=r2, w2=r5
b do_return
return_35:
setup_return w1=r3, w2=r5
b do_return
return_24:
setup_return w1=r2, w2=r4
do_return:
#ifdef __ARMEB__
mov r0, ip
#else /* not __ARMEB__ */
rev r0, ip
#endif /* not __ARMEB__ */
/* Restore temporaries early, before computing the return value. */
ldrd r6, r7, [sp]
ldrd r4, r5, [sp, #8]
adds sp, sp, #16
/* There is a zero or a different byte between r1 and r2. */
/* r0 contains a mask of all-zero bytes in r1. */
/* Using r0 and not ip here because cbz requires low register. */
m_cbz reg=r0, label=compute_return_value
clz r0, r0
/* r0 contains the number of bits on the left of the first all-zero byte in r1. */
rsb r0, r0, #24
/* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
lsr r1, r1, r0
lsr r2, r2, r0
compute_return_value:
movs r0, #1
cmp r1, r2
/* The return value is computed as follows.
If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
which means r0:=r0-r0-1 and r0 is #-1 at return.
If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
which means r0:=r0-r0 and r0 is #0 at return.
(C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
it ls
sbcls r0, r0, r0
bx lr
/* The code from the previous version of strcmp.S handles this
* particular case (the second string is 2 bytes off a word alignment)
* faster than any current version. In this very specific case, use the
* previous version. See bionic/libc/arch-arm/cortex-a15/bionic/strcmp.S
* for the unedited version of this code.
*/
miscmp_word_16:
wp1 .req r0
wp2 .req r1
b1 .req r2
w1 .req r4
w2 .req r5
t1 .req ip
@ r3 is scratch
/* At this point, wp1 (r0) has already been word-aligned. */
2:
mov b1, #1
orr b1, b1, b1, lsl #8
orr b1, b1, b1, lsl #16
and t1, wp2, #3
bic wp2, wp2, #3
ldr w1, [wp1], #4
ldr w2, [wp2], #4
/* Critical inner Loop: Block with 2 bytes initial overlap */
.p2align 2
2:
S2HIMEM t1, w1, #16
sub r3, w1, b1
S2LOMEM t1, t1, #16
bic r3, r3, w1
cmp t1, w2, S2LOMEM #16
bne 4f
ands r3, r3, b1, lsl #7
it eq
ldreq w2, [wp2], #4
bne 5f
eor t1, t1, w1
cmp t1, w2, S2HIMEM #16
bne 6f
ldr w1, [wp1], #4
b 2b
5:
#ifdef __ARMEB__
/* The syndrome value may contain false ones if the string ends
* with the bytes 0x01 0x00
*/
tst w1, #0xff000000
it ne
tstne w1, #0x00ff0000
beq 7f
#else
lsls r3, r3, #16
bne 7f
#endif
ldrh w2, [wp2]
S2LOMEM t1, w1, #16
#ifdef __ARMEB__
lsl w2, w2, #16
#endif
b 8f
6:
S2HIMEM w2, w2, #16
S2LOMEM t1, w1, #16
4:
S2LOMEM w2, w2, #16
b 8f
7:
mov r0, #0
/* Restore registers and stack. */
ldrd r6, r7, [sp]
ldrd r4, r5, [sp, #8]
adds sp, sp, #16
bx lr
8:
and r2, t1, #LSB
and r0, w2, #LSB
cmp r0, #1
it cs
cmpcs r0, r2
itt eq
S2LOMEMEQ t1, t1, #8
S2LOMEMEQ w2, w2, #8
beq 8b
sub r0, r2, r0
/* Restore registers and stack. */
ldrd r6, r7, [sp]
ldrd r4, r5, [sp, #8]
adds sp, sp, #16
bx lr
END(strcmp)

View File

@@ -0,0 +1,7 @@
$(call libc-add-cpu-variant-src,MEMCPY,arch-arm/krait/bionic/memcpy.S)
$(call libc-add-cpu-variant-src,MEMSET,arch-arm/krait/bionic/memset.S)
$(call libc-add-cpu-variant-src,STRCMP,arch-arm/krait/bionic/strcmp.S)
# Use cortex-a15 version of strlen.
$(call libc-add-cpu-variant-src,STRLEN,arch-arm/cortex-a15/bionic/strlen.S)
include bionic/libc/arch-arm/generic/generic.mk

View File

@@ -4,6 +4,7 @@ syscall_src += arch-arm/syscalls/_exit.S
syscall_src += arch-arm/syscalls/_exit_thread.S
syscall_src += arch-arm/syscalls/__fork.S
syscall_src += arch-arm/syscalls/__waitid.S
syscall_src += arch-arm/syscalls/wait4.S
syscall_src += arch-arm/syscalls/__sys_clone.S
syscall_src += arch-arm/syscalls/execve.S
syscall_src += arch-arm/syscalls/__setuid.S
@@ -125,6 +126,8 @@ syscall_src += arch-arm/syscalls/removexattr.S
syscall_src += arch-arm/syscalls/lremovexattr.S
syscall_src += arch-arm/syscalls/__statfs64.S
syscall_src += arch-arm/syscalls/unshare.S
syscall_src += arch-arm/syscalls/swapon.S
syscall_src += arch-arm/syscalls/swapoff.S
syscall_src += arch-arm/syscalls/pause.S
syscall_src += arch-arm/syscalls/gettimeofday.S
syscall_src += arch-arm/syscalls/settimeofday.S
@@ -143,6 +146,9 @@ syscall_src += arch-arm/syscalls/__timer_getoverrun.S
syscall_src += arch-arm/syscalls/__timer_delete.S
syscall_src += arch-arm/syscalls/utimes.S
syscall_src += arch-arm/syscalls/utimensat.S
syscall_src += arch-arm/syscalls/timerfd_create.S
syscall_src += arch-arm/syscalls/timerfd_settime.S
syscall_src += arch-arm/syscalls/timerfd_gettime.S
syscall_src += arch-arm/syscalls/sigaction.S
syscall_src += arch-arm/syscalls/sigprocmask.S
syscall_src += arch-arm/syscalls/__sigsuspend.S
@@ -180,7 +186,6 @@ syscall_src += arch-arm/syscalls/__getcpu.S
syscall_src += arch-arm/syscalls/ioprio_set.S
syscall_src += arch-arm/syscalls/ioprio_get.S
syscall_src += arch-arm/syscalls/uname.S
syscall_src += arch-arm/syscalls/__wait4.S
syscall_src += arch-arm/syscalls/umask.S
syscall_src += arch-arm/syscalls/__reboot.S
syscall_src += arch-arm/syscalls/__syslog.S

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__brk)
mov ip, r7
ldr r7, =__NR_brk
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__brk)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__fcntl)
mov ip, r7
ldr r7, =__NR_fcntl
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__fcntl)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__fcntl64)
mov ip, r7
ldr r7, =__NR_fcntl64
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__fcntl64)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__fork)
mov ip, r7
ldr r7, =__NR_fork
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__fork)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__fstatfs64)
mov ip, r7
ldr r7, =__NR_fstatfs64
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__fstatfs64)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__getcpu)
mov ip, r7
ldr r7, =__NR_getcpu
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__getcpu)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__getcwd)
mov ip, r7
ldr r7, =__NR_getcwd
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__getcwd)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__getpriority)
mov ip, r7
ldr r7, =__NR_getpriority
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__getpriority)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__ioctl)
mov ip, r7
ldr r7, =__NR_ioctl
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__ioctl)

View File

@@ -1,6 +1,7 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__llseek)
mov ip, sp
@@ -10,7 +11,8 @@ ENTRY(__llseek)
ldr r7, =__NR__llseek
swi #0
ldmfd sp!, {r4, r5, r6, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__llseek)

View File

@@ -1,6 +1,7 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__mmap2)
mov ip, sp
@@ -10,7 +11,8 @@ ENTRY(__mmap2)
ldr r7, =__NR_mmap2
swi #0
ldmfd sp!, {r4, r5, r6, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__mmap2)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__open)
mov ip, r7
ldr r7, =__NR_open
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__open)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__openat)
mov ip, r7
ldr r7, =__NR_openat
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__openat)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__ptrace)
mov ip, r7
ldr r7, =__NR_ptrace
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__ptrace)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__reboot)
mov ip, r7
ldr r7, =__NR_reboot
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__reboot)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__rt_sigaction)
mov ip, r7
ldr r7, =__NR_rt_sigaction
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__rt_sigaction)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__rt_sigprocmask)
mov ip, r7
ldr r7, =__NR_rt_sigprocmask
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__rt_sigprocmask)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__rt_sigtimedwait)
mov ip, r7
ldr r7, =__NR_rt_sigtimedwait
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__rt_sigtimedwait)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__sched_getaffinity)
mov ip, r7
ldr r7, =__NR_sched_getaffinity
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__sched_getaffinity)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__set_tls)
mov ip, r7
ldr r7, =__NR_ARM_set_tls
ldr r7, =__ARM_NR_set_tls
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__set_tls)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__setresuid)
mov ip, r7
ldr r7, =__NR_setresuid32
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__setresuid)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__setreuid)
mov ip, r7
ldr r7, =__NR_setreuid32
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__setreuid)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__setuid)
mov ip, r7
ldr r7, =__NR_setuid32
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__setuid)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__sigsuspend)
mov ip, r7
ldr r7, =__NR_sigsuspend
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__sigsuspend)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__statfs64)
mov ip, r7
ldr r7, =__NR_statfs64
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__statfs64)

View File

@@ -1,6 +1,7 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__sys_clone)
mov ip, sp
@@ -10,7 +11,8 @@ ENTRY(__sys_clone)
ldr r7, =__NR_clone
swi #0
ldmfd sp!, {r4, r5, r6, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__sys_clone)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__syslog)
mov ip, r7
ldr r7, =__NR_syslog
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__syslog)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__timer_create)
mov ip, r7
ldr r7, =__NR_timer_create
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__timer_create)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__timer_delete)
mov ip, r7
ldr r7, =__NR_timer_delete
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__timer_delete)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__timer_getoverrun)
mov ip, r7
ldr r7, =__NR_timer_getoverrun
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__timer_getoverrun)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__timer_gettime)
mov ip, r7
ldr r7, =__NR_timer_gettime
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__timer_gettime)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__timer_settime)
mov ip, r7
ldr r7, =__NR_timer_settime
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__timer_settime)

View File

@@ -1,13 +0,0 @@
/* autogenerated by gensyscalls.py */
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__wait4)
mov ip, r7
ldr r7, =__NR_wait4
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
END(__wait4)

View File

@@ -1,6 +1,7 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(__waitid)
mov ip, sp
@@ -10,7 +11,8 @@ ENTRY(__waitid)
ldr r7, =__NR_waitid
swi #0
ldmfd sp!, {r4, r5, r6, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(__waitid)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(_exit)
mov ip, r7
ldr r7, =__NR_exit_group
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(_exit)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(_exit_thread)
mov ip, r7
ldr r7, =__NR_exit
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(_exit_thread)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(accept)
mov ip, r7
ldr r7, =__NR_accept
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(accept)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(access)
mov ip, r7
ldr r7, =__NR_access
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(access)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(acct)
mov ip, r7
ldr r7, =__NR_acct
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(acct)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(bind)
mov ip, r7
ldr r7, =__NR_bind
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(bind)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(cacheflush)
mov ip, r7
ldr r7, =__NR_ARM_cacheflush
ldr r7, =__ARM_NR_cacheflush
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(cacheflush)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(capget)
mov ip, r7
ldr r7, =__NR_capget
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(capget)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(capset)
mov ip, r7
ldr r7, =__NR_capset
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(capset)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(chdir)
mov ip, r7
ldr r7, =__NR_chdir
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(chdir)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(chmod)
mov ip, r7
ldr r7, =__NR_chmod
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(chmod)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(chown)
mov ip, r7
ldr r7, =__NR_chown32
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(chown)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(chroot)
mov ip, r7
ldr r7, =__NR_chroot
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(chroot)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(clock_getres)
mov ip, r7
ldr r7, =__NR_clock_getres
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(clock_getres)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(clock_gettime)
mov ip, r7
ldr r7, =__NR_clock_gettime
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(clock_gettime)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(clock_nanosleep)
mov ip, r7
ldr r7, =__NR_clock_nanosleep
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(clock_nanosleep)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(clock_settime)
mov ip, r7
ldr r7, =__NR_clock_settime
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(clock_settime)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(close)
mov ip, r7
ldr r7, =__NR_close
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(close)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(connect)
mov ip, r7
ldr r7, =__NR_connect
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(connect)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(delete_module)
mov ip, r7
ldr r7, =__NR_delete_module
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(delete_module)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(dup)
mov ip, r7
ldr r7, =__NR_dup
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(dup)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(dup2)
mov ip, r7
ldr r7, =__NR_dup2
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(dup2)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(epoll_create)
mov ip, r7
ldr r7, =__NR_epoll_create
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(epoll_create)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(epoll_ctl)
mov ip, r7
ldr r7, =__NR_epoll_ctl
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(epoll_ctl)

View File

@@ -1,13 +1,15 @@
/* autogenerated by gensyscalls.py */
#include <asm/unistd.h>
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(epoll_wait)
mov ip, r7
ldr r7, =__NR_epoll_wait
swi #0
mov r7, ip
movs r0, r0
bxpl lr
b __set_syscall_errno
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno
END(epoll_wait)

Some files were not shown because too many files have changed in this diff Show More