Compare commits

..

28 Commits

Author SHA1 Message Date
Colin Cross
635ff5fdac Merge "Add more of the bionic architecture logic" into master-soong 2015-07-10 01:03:44 +00:00
Colin Cross
a56904273c Merge "Add version_script to bionic Blueprints" into master-soong 2015-07-10 01:00:49 +00:00
Colin Cross
f93bddbbd8 Merge "Add translation directives to libc Android.bp" into master-soong 2015-07-10 01:00:41 +00:00
Colin Cross
2803a15d4c Add more of the bionic architecture logic
Change-Id: I8aecabdc7219ad9e9affab1a3e67164352858bf9
2015-07-09 17:42:58 -07:00
Colin Cross
70f11bd69b Add version_script to bionic Blueprints
Change-Id: Ieddd80fa599c21414608e1d1c20ebaa2507a66ef
2015-07-09 17:35:15 -07:00
Colin Cross
8f7a4a3cb5 Add translation directives to libc Android.bp
Add translation directives for the crt*.o files instead of trying
to handle them in the translator.

Change-Id: I44a491f1823f483d9c40368da35d4e0cf16030f2
2015-07-09 17:35:07 -07:00
Dan Willemsen
649a2ea0e2 Merge "Switch libm from thumb to arm" into master-soong 2015-07-09 05:31:41 +00:00
Dan Willemsen
c371306714 Switch libm from thumb to arm
To match the Android.mk file

Change-Id: I02cb5f4b140c03bc8630879f005e027426a5dd99
2015-07-08 19:12:09 -07:00
Dan Willemsen
87e33892fc Merge "Use exclude_srcs instead of "-file"" into master-soong 2015-07-06 19:52:39 +00:00
Dan Willemsen
7454daa97f Use exclude_srcs instead of "-file"
Change-Id: Ie07c5901233d429102f9b6afcef12ea8c4bdda2c
2015-07-01 14:00:21 -07:00
Colin Cross
dab6ead2aa Rename Blueprints to Android.bp
Rename module definition files to Android.bp to avoid conflicts
with another project called Blueprint.

Change-Id: I69cfe9649fe35735dade6416d15b171a5bb2e283
2015-05-20 13:11:07 -07:00
Colin Cross
7d0b7b4ba2 Merge "Remove nonexistant include path" into master-soong 2015-05-12 19:31:16 +00:00
Colin Cross
98f4e07237 Remove nonexistant include path
upstream-freebsd/lib/libc/include doesn't exist, remove it from the
include path.

Change-Id: I0492784db5dc45e4a9a937956d095a147a08e835
2015-05-12 11:35:48 -07:00
Colin Cross
b624072cbd Merge "Export libbenchmark include dir" into master-soong 2015-05-08 00:06:32 +00:00
Colin Cross
0ecd342743 Export libbenchmark include dir
Export the libbenchmark include directory so the build system
doesn't have manually add it for cc_benchmark modules.

Change-Id: I918a2fa5fb3104f4c2d86930ed2b9c5e00820ec6
2015-05-07 15:51:37 -07:00
Colin Cross
faa14d4de8 Merge "Fix Blueprints for building on Darwin" into master-soong 2015-05-07 21:31:49 +00:00
Colin Cross
abc97e2e71 Fix Blueprints for building on Darwin
Change-Id: I252e1b8a9ace397609f056f69aff83331b92aab7
2015-05-05 16:52:38 -07:00
Colin Cross
6549fe249c Update Blueprints files for AOSP changes
Change-Id: I915fc1e00b6e6eb1d6c08233893517b1d56c74fa
2015-04-29 11:34:24 -07:00
Colin Cross
c15e8fdb8d Merge remote-tracking branch 'aosp/master' into aosp 2015-04-29 11:29:05 -07:00
Colin Cross
90d6279802 Merge "Add Blueprints files for remaining bionic modules" into master-soong 2015-03-28 01:15:36 +00:00
Colin Cross
22d8776587 Add Blueprints files for remaining bionic modules
Change-Id: Ic9440fddb44ca1f17aad5b249535d7b96dd8d690
2015-03-27 11:14:39 -07:00
Colin Cross
51b8912253 Merge "Merge remote-tracking branch 'aosp/master' into HEAD" into master-soong 2015-03-17 19:31:44 +00:00
Colin Cross
270f2ea800 Merge remote-tracking branch 'aosp/master' into HEAD
Change-Id: Ia313444a62bcdeb676185b56ce730d0f997c8226
2015-03-17 12:30:41 -07:00
Colin Cross
7357ad0875 Merge "Update bionic Blueprints to match latest AOSP master" into master-soong 2015-03-17 00:44:29 +00:00
Colin Cross
959bc099a3 Merge remote-tracking branch 'aosp/master' into HEAD 2015-03-16 16:54:58 -07:00
Colin Cross
68a3b658b1 Update bionic Blueprints to match latest AOSP master
Change-Id: I90410ec60acfc3dcbdbcd0be6f283a90f4395643
2015-03-16 16:31:26 -07:00
Colin Cross
062d498e28 Merge "Initial bionic Blueprints files" into master-soong 2015-03-14 06:41:45 +00:00
Colin Cross
d2b8741e1b Initial bionic Blueprints files
Change-Id: Iafe8e84e0dc62e7d7c830e2c272ec92abdf6a801
2015-03-10 14:11:55 -07:00
501 changed files with 12239 additions and 37020 deletions

View File

@ -1,15 +0,0 @@
BasedOnStyle: Google
AllowShortBlocksOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
ColumnLimit: 100
CommentPragmas: NOLINT:.*
DerivePointerAlignment: false
IndentWidth: 2
ContinuationIndentWidth: 2
PointerAlignment: Left
TabWidth: 2
UseTab: Never
PenaltyExcessCharacter: 32
Cpp11BracedListStyle: false

View File

@ -169,10 +169,9 @@ As mentioned above, this is currently a two-step process:
Updating tzdata Updating tzdata
--------------- ---------------
This is fully automated (and these days handled by the libcore team, because This is fully automated:
they own icu, and that needs to be updated in sync with bionic):
1. Run update-tzdata.py in external/icu/tools/. 1. Run update-tzdata.py.
Verifying changes Verifying changes
@ -195,15 +194,14 @@ The tests are all built from the tests/ directory.
### Device tests ### Device tests
$ mma $ mma
$ adb remount
$ adb sync $ adb sync
$ adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests32 $ adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests32
$ adb shell \ $ adb shell \
/data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static32 /data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static32
# Only for 64-bit targets # Only for 64-bit targets
$ adb shell /data/nativetest64/bionic-unit-tests/bionic-unit-tests64 $ adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests64
$ adb shell \ $ adb shell \
/data/nativetest64/bionic-unit-tests-static/bionic-unit-tests-static64 /data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static64
### Host tests ### Host tests
@ -258,33 +256,18 @@ First, build and run the host tests as usual (see above).
The coverage report is now available at `covreport/index.html`. The coverage report is now available at `covreport/index.html`.
Attaching GDB to the tests LP32 ABI bugs
-------------------------- -------------
Bionic's test runner will run each test in its own process by default to prevent
tests failures from impacting other tests. This also has the added benefit of
running them in parallel, so they are much faster.
However, this also makes it difficult to run the tests under GDB. To prevent
each test from being forked, run the tests with the flag `--no-isolate`.
32-bit ABI bugs
---------------
This probably belongs in the NDK documentation rather than here, but these This probably belongs in the NDK documentation rather than here, but these
are the known ABI bugs in the 32-bit ABI: are the known ABI bugs in LP32:
* `time_t` is 32-bit. <http://b/5819737>. In the 64-bit ABI, time_t is * `time_t` is 32-bit. <http://b/5819737>
64-bit.
* `off_t` is 32-bit. There is `off64_t`, and in newer releases there is * `off_t` is 32-bit. There is `off64_t`, but no `_FILE_OFFSET_BITS` support.
almost-complete support for `_FILE_OFFSET_BITS`. Unfortunately our stdio Many of the `off64_t` functions are missing in older releases, and
implementation uses 32-bit offsets and -- worse -- function pointers to stdio uses 32-bit offsets, so there's no way to fully implement
functions that use 32-bit offsets, so there's no good way to implement `_FILE_OFFSET_BITS`.
the last few pieces <http://b/24807045>. In the 64-bit ABI, off_t is
off64_t.
* `sigset_t` is too small on ARM and x86 (but correct on MIPS), so support * `sigset_t` is too small on ARM and x86 (but correct on MIPS), so support
for real-time signals is broken. <http://b/5828899> In the 64-bit ABI, for real-time signals is broken. <http://b/5828899>
`sigset_t` is the correct size for every architecture.

123
benchmarks/Android.bp Normal file
View File

@ -0,0 +1,123 @@
//
// Copyright (C) 2013 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// Benchmarks library, usable by projects outside this directory.
// -----------------------------------------------------------------------------
benchmark_cflags = [
"-O2",
"-fno-builtin",
"-Wall",
"-Wextra",
"-Werror",
"-Wunused",
]
benchmark_cppflags = ["-std=gnu++11"]
benchmarklib_src_files = [
"Benchmark.cpp",
"utils.cpp",
"main.cpp",
]
cc_library_static {
name: "libbenchmark",
host_supported: true,
cflags: benchmark_cflags,
cppflags: benchmark_cppflags,
srcs: benchmarklib_src_files,
static_libs: ["libbase"],
export_include_dirs: ["."],
target: {
darwin: {
// Only supported on linux systems.
disabled: true,
},
},
}
// -----------------------------------------------------------------------------
// Benchmarks.
// -----------------------------------------------------------------------------
benchmark_src_files = [
"math_benchmark.cpp",
"property_benchmark.cpp",
"pthread_benchmark.cpp",
"semaphore_benchmark.cpp",
"stdio_benchmark.cpp",
"string_benchmark.cpp",
"time_benchmark.cpp",
"unistd_benchmark.cpp",
]
// Build benchmarks for the device (with bionic's .so). Run with:
// adb shell bionic-benchmarks32
// adb shell bionic-benchmarks64
cc_binary {
name: "bionic-benchmarks",
multilib: {
lib32: {
suffix: "32",
},
lib64: {
suffix: "64",
},
},
compile_multilib: "both",
cflags: benchmark_cflags,
cppflags: benchmark_cppflags,
srcs: benchmark_src_files,
static_libs: [
"libbenchmark",
"libbase",
],
}
// We don't build a static benchmark executable because it's not usually
// useful. If you're trying to run the current benchmarks on an older
// release, it's (so far at least) always because you want to measure the
// performance of the old release's libc, and a static benchmark isn't
// going to let you do that.
// Build benchmarks for the host (against glibc!). Run with:
cc_binary_host {
name: "bionic-benchmarks-glibc",
multilib: {
lib32: {
stem: "bionic-benchmarks-glibc32",
},
lib64: {
stem: "bionic-benchmarks-glibc64",
},
},
compile_multilib: "both",
cflags: benchmark_cflags,
cppflags: benchmark_cppflags,
ldflags: ["-lrt"],
srcs: benchmark_src_files,
static_libs: [
"libbenchmark",
"libbase",
],
target: {
darwin: {
// Only supported on linux systems.
disabled: true,
},
},
}

View File

@ -29,6 +29,7 @@ benchmark_cflags := \
-Wunused \ -Wunused \
benchmark_cppflags := \ benchmark_cppflags := \
-std=gnu++11 \
benchmarklib_src_files := \ benchmarklib_src_files := \
Benchmark.cpp \ Benchmark.cpp \

View File

@ -24,7 +24,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include <android-base/stringprintf.h> #include <base/stringprintf.h>
#include <benchmark/Benchmark.h> #include <benchmark/Benchmark.h>

View File

@ -65,50 +65,6 @@ void BM_math_logb::Run(int iters) {
StopBenchmarkTiming(); StopBenchmarkTiming();
} }
BENCHMARK_WITH_ARG(BM_math_isfinite_macro, double)->AT_COMMON_VALS;
void BM_math_isfinite_macro::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += isfinite(v);
}
StopBenchmarkTiming();
}
#if defined(__BIONIC__)
#define test_isfinite __isfinite
#else
#define test_isfinite __finite
#endif
BENCHMARK_WITH_ARG(BM_math_isfinite, double)->AT_COMMON_VALS;
void BM_math_isfinite::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += test_isfinite(v);
}
StopBenchmarkTiming();
}
BENCHMARK_WITH_ARG(BM_math_isinf_macro, double)->AT_COMMON_VALS;
void BM_math_isinf_macro::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += isinf(v);
}
StopBenchmarkTiming();
}
BENCHMARK_WITH_ARG(BM_math_isinf, double)->AT_COMMON_VALS; BENCHMARK_WITH_ARG(BM_math_isinf, double)->AT_COMMON_VALS;
void BM_math_isinf::Run(int iters, double value) { void BM_math_isinf::Run(int iters, double value) {
StartBenchmarkTiming(); StartBenchmarkTiming();
@ -122,60 +78,6 @@ void BM_math_isinf::Run(int iters, double value) {
StopBenchmarkTiming(); StopBenchmarkTiming();
} }
BENCHMARK_WITH_ARG(BM_math_isnan_macro, double)->AT_COMMON_VALS;
void BM_math_isnan_macro::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += isnan(v);
}
StopBenchmarkTiming();
}
BENCHMARK_WITH_ARG(BM_math_isnan, double)->AT_COMMON_VALS;
void BM_math_isnan::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += (isnan)(v);
}
StopBenchmarkTiming();
}
BENCHMARK_WITH_ARG(BM_math_isnormal_macro, double)->AT_COMMON_VALS;
void BM_math_isnormal_macro::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += isnormal(v);
}
StopBenchmarkTiming();
}
#if defined(__BIONIC__)
BENCHMARK_WITH_ARG(BM_math_isnormal, double)->AT_COMMON_VALS;
void BM_math_isnormal::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += (__isnormal)(v);
}
StopBenchmarkTiming();
}
#endif
BENCHMARK_NO_ARG(BM_math_sin_fast); BENCHMARK_NO_ARG(BM_math_sin_fast);
void BM_math_sin_fast::Run(int iters) { void BM_math_sin_fast::Run(int iters) {
StartBenchmarkTiming(); StartBenchmarkTiming();
@ -232,55 +134,3 @@ void BM_math_fpclassify::Run(int iters, double value) {
StopBenchmarkTiming(); StopBenchmarkTiming();
} }
BENCHMARK_WITH_ARG(BM_math_signbit_macro, double)->AT_COMMON_VALS;
void BM_math_signbit_macro::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += signbit(v);
}
StopBenchmarkTiming();
}
BENCHMARK_WITH_ARG(BM_math_signbit, double)->AT_COMMON_VALS;
void BM_math_signbit::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += (__signbit)(v);
}
StopBenchmarkTiming();
}
BENCHMARK_WITH_ARG(BM_math_fabs_macro, double)->AT_COMMON_VALS;
void BM_math_fabs_macro::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += fabs(v);
}
StopBenchmarkTiming();
}
BENCHMARK_WITH_ARG(BM_math_fabs, double)->AT_COMMON_VALS;
void BM_math_fabs::Run(int iters, double value) {
StartBenchmarkTiming();
d = 0.0;
v = value;
for (int i = 0; i < iters; ++i) {
d += (fabs)(v);
}
StopBenchmarkTiming();
}

View File

@ -16,7 +16,6 @@
#include <stdio.h> #include <stdio.h>
#include <stdio_ext.h> #include <stdio_ext.h>
#include <stdlib.h>
#include <benchmark/Benchmark.h> #include <benchmark/Benchmark.h>
@ -74,7 +73,7 @@ static void FopenFgetsFclose(int iters, bool no_locking) {
for (int i = 0; i < iters; ++i) { for (int i = 0; i < iters; ++i) {
FILE* fp = fopen("/proc/version", "re"); FILE* fp = fopen("/proc/version", "re");
if (no_locking) __fsetlocking(fp, FSETLOCKING_BYCALLER); if (no_locking) __fsetlocking(fp, FSETLOCKING_BYCALLER);
if (fgets(buf, sizeof(buf), fp) == nullptr) abort(); fgets(buf, sizeof(buf), fp);
fclose(fp); fclose(fp);
} }
} }

View File

@ -17,7 +17,6 @@
#include <sys/syscall.h> #include <sys/syscall.h>
#include <sys/time.h> #include <sys/time.h>
#include <time.h> #include <time.h>
#include <unistd.h>
#include <benchmark/Benchmark.h> #include <benchmark/Benchmark.h>

View File

@ -14,8 +14,6 @@
* limitations under the License. * limitations under the License.
*/ */
#include "utils.h"
#include <inttypes.h> #include <inttypes.h>
#include <stdio.h> #include <stdio.h>
#include <stdint.h> #include <stdint.h>
@ -23,7 +21,7 @@
#include <string> #include <string>
#include <android-base/stringprintf.h> #include "utils.h"
int Round(int n) { int Round(int n) {
int base = 1; int base = 1;
@ -74,7 +72,10 @@ std::string PrettyInt(long value, size_t base) {
break; break;
} }
} }
return android::base::StringPrintf("%s%" PRId64 "%s", char* s = NULL;
negative_number ? "-" : "", asprintf(&s, "%s%" PRId64 "%s", (negative_number ? "-" : ""),
count / kAmountPerUnit[i], kUnitStrings[i]); count / kAmountPerUnit[i], kUnitStrings[i]);
std::string result(s);
free(s);
return result;
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
LOCAL_PATH := $(call my-dir) LOCAL_PATH := $(call my-dir)
bionic_coverage ?= false bionic_coverage := false
# Make everything depend on any changes to included makefiles. # Make everything depend on any changes to included makefiles.
libc_common_additional_dependencies := $(LOCAL_PATH)/Android.mk libc_common_additional_dependencies := $(LOCAL_PATH)/Android.mk
@ -47,6 +47,7 @@ libc_common_src_files := \
bionic/if_indextoname.c \ bionic/if_indextoname.c \
bionic/if_nametoindex.c \ bionic/if_nametoindex.c \
bionic/initgroups.c \ bionic/initgroups.c \
bionic/ioctl.c \
bionic/isatty.c \ bionic/isatty.c \
bionic/memmem.c \ bionic/memmem.c \
bionic/pututline.c \ bionic/pututline.c \
@ -58,7 +59,6 @@ libc_common_src_files := \
bionic/system_properties_compat.c \ bionic/system_properties_compat.c \
stdio/findfp.c \ stdio/findfp.c \
stdio/fread.c \ stdio/fread.c \
stdio/refill.c \
stdio/snprintf.c\ stdio/snprintf.c\
stdio/sprintf.c \ stdio/sprintf.c \
stdio/stdio.c \ stdio/stdio.c \
@ -70,17 +70,12 @@ libc_common_src_files := \
libc_common_src_files += \ libc_common_src_files += \
bionic/__FD_chk.cpp \ bionic/__FD_chk.cpp \
bionic/__fgets_chk.cpp \ bionic/__fgets_chk.cpp \
bionic/__fread_chk.cpp \
bionic/__fwrite_chk.cpp \
bionic/__getcwd_chk.cpp \
bionic/__memchr_chk.cpp \ bionic/__memchr_chk.cpp \
bionic/__memmove_chk.cpp \ bionic/__memmove_chk.cpp \
bionic/__memrchr_chk.cpp \ bionic/__memrchr_chk.cpp \
bionic/__poll_chk.cpp \ bionic/__poll_chk.cpp \
bionic/__pread64_chk.cpp \ bionic/__pread64_chk.cpp \
bionic/__pread_chk.cpp \ bionic/__pread_chk.cpp \
bionic/__pwrite64_chk.cpp \
bionic/__pwrite_chk.cpp \
bionic/__read_chk.cpp \ bionic/__read_chk.cpp \
bionic/__readlink_chk.cpp \ bionic/__readlink_chk.cpp \
bionic/__readlinkat_chk.cpp \ bionic/__readlinkat_chk.cpp \
@ -97,14 +92,12 @@ libc_common_src_files += \
bionic/__umask_chk.cpp \ bionic/__umask_chk.cpp \
bionic/__vsnprintf_chk.cpp \ bionic/__vsnprintf_chk.cpp \
bionic/__vsprintf_chk.cpp \ bionic/__vsprintf_chk.cpp \
bionic/__write_chk.cpp
libc_bionic_ndk_src_files := \ libc_bionic_ndk_src_files := \
bionic/abort.cpp \ bionic/abort.cpp \
bionic/accept.cpp \ bionic/accept.cpp \
bionic/accept4.cpp \ bionic/accept4.cpp \
bionic/access.cpp \ bionic/access.cpp \
bionic/arpa_inet.cpp \
bionic/assert.cpp \ bionic/assert.cpp \
bionic/atof.cpp \ bionic/atof.cpp \
bionic/bionic_systrace.cpp \ bionic/bionic_systrace.cpp \
@ -136,12 +129,8 @@ libc_bionic_ndk_src_files := \
bionic/fchmod.cpp \ bionic/fchmod.cpp \
bionic/fchmodat.cpp \ bionic/fchmodat.cpp \
bionic/ffs.cpp \ bionic/ffs.cpp \
bionic/fgetxattr.cpp \
bionic/flistxattr.cpp \
bionic/flockfile.cpp \ bionic/flockfile.cpp \
bionic/fpclassify.cpp \ bionic/fpclassify.cpp \
bionic/fsetxattr.cpp \
bionic/ftruncate.cpp \
bionic/futimens.cpp \ bionic/futimens.cpp \
bionic/getcwd.cpp \ bionic/getcwd.cpp \
bionic/gethostname.cpp \ bionic/gethostname.cpp \
@ -149,9 +138,7 @@ libc_bionic_ndk_src_files := \
bionic/getpid.cpp \ bionic/getpid.cpp \
bionic/gettid.cpp \ bionic/gettid.cpp \
bionic/__gnu_basename.cpp \ bionic/__gnu_basename.cpp \
bionic/ifaddrs.cpp \
bionic/inotify_init.cpp \ bionic/inotify_init.cpp \
bionic/ioctl.cpp \
bionic/lchown.cpp \ bionic/lchown.cpp \
bionic/lfs64_support.cpp \ bionic/lfs64_support.cpp \
bionic/__libc_current_sigrtmax.cpp \ bionic/__libc_current_sigrtmax.cpp \
@ -171,7 +158,6 @@ libc_bionic_ndk_src_files := \
bionic/mkfifo.cpp \ bionic/mkfifo.cpp \
bionic/mknod.cpp \ bionic/mknod.cpp \
bionic/mntent.cpp \ bionic/mntent.cpp \
bionic/mremap.cpp \
bionic/NetdClientDispatch.cpp \ bionic/NetdClientDispatch.cpp \
bionic/open.cpp \ bionic/open.cpp \
bionic/pathconf.cpp \ bionic/pathconf.cpp \
@ -218,7 +204,6 @@ libc_bionic_ndk_src_files := \
bionic/socket.cpp \ bionic/socket.cpp \
bionic/stat.cpp \ bionic/stat.cpp \
bionic/statvfs.cpp \ bionic/statvfs.cpp \
bionic/strchrnul.cpp \
bionic/strerror.cpp \ bionic/strerror.cpp \
bionic/strerror_r.cpp \ bionic/strerror_r.cpp \
bionic/strsignal.cpp \ bionic/strsignal.cpp \
@ -244,11 +229,9 @@ libc_bionic_ndk_src_files := \
libc_bionic_src_files := libc_bionic_src_files :=
# The following implementations depend on pthread data, so we can't include # The fork implementation depends on pthread data, so we can't include it in
# them in libc_ndk.a. # libc_ndk.a.
libc_bionic_src_files += \ libc_bionic_src_files += bionic/fork.cpp
bionic/__cxa_thread_atexit_impl.cpp \
bionic/fork.cpp \
# The data that backs getauxval is initialized in the libc init functions which # The data that backs getauxval is initialized in the libc init functions which
# are invoked by the linker. If this file is included in libc_ndk.a, only one of # are invoked by the linker. If this file is included in libc_ndk.a, only one of
@ -256,20 +239,10 @@ libc_bionic_src_files += \
# dereferences. # dereferences.
libc_bionic_src_files += bionic/getauxval.cpp libc_bionic_src_files += bionic/getauxval.cpp
# These four require getauxval, which isn't available on older platforms. # These three require getauxval, which isn't available on older platforms.
libc_bionic_src_files += bionic/getentropy_linux.c libc_bionic_src_files += bionic/getentropy_linux.c
libc_bionic_src_files += bionic/sysconf.cpp libc_bionic_src_files += bionic/sysconf.cpp
libc_bionic_src_files += bionic/vdso.cpp libc_bionic_src_files += bionic/vdso.cpp
libc_bionic_src_files += bionic/setjmp_cookie.cpp
libc_bionic_src_files += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
libc_cxa_src_files := \ libc_cxa_src_files := \
bionic/__cxa_guard.cpp \ bionic/__cxa_guard.cpp \
@ -280,33 +253,30 @@ libc_upstream_freebsd_src_files := \
upstream-freebsd/lib/libc/gen/ldexp.c \ upstream-freebsd/lib/libc/gen/ldexp.c \
upstream-freebsd/lib/libc/gen/sleep.c \ upstream-freebsd/lib/libc/gen/sleep.c \
upstream-freebsd/lib/libc/gen/usleep.c \ upstream-freebsd/lib/libc/gen/usleep.c \
upstream-freebsd/lib/libc/stdlib/abs.c \
upstream-freebsd/lib/libc/stdlib/getopt_long.c \ upstream-freebsd/lib/libc/stdlib/getopt_long.c \
upstream-freebsd/lib/libc/stdlib/imaxabs.c \
upstream-freebsd/lib/libc/stdlib/imaxdiv.c \
upstream-freebsd/lib/libc/stdlib/labs.c \
upstream-freebsd/lib/libc/stdlib/llabs.c \
upstream-freebsd/lib/libc/stdlib/qsort.c \ upstream-freebsd/lib/libc/stdlib/qsort.c \
upstream-freebsd/lib/libc/stdlib/quick_exit.c \ upstream-freebsd/lib/libc/stdlib/quick_exit.c \
upstream-freebsd/lib/libc/stdlib/realpath.c \ upstream-freebsd/lib/libc/stdlib/realpath.c \
upstream-freebsd/lib/libc/string/wcpcpy.c \ upstream-freebsd/lib/libc/string/wcpcpy.c \
upstream-freebsd/lib/libc/string/wcpncpy.c \ upstream-freebsd/lib/libc/string/wcpncpy.c \
upstream-freebsd/lib/libc/string/wcscasecmp.c \ upstream-freebsd/lib/libc/string/wcscasecmp.c \
upstream-freebsd/lib/libc/string/wcscat.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
upstream-freebsd/lib/libc/string/wcscpy.c \
upstream-freebsd/lib/libc/string/wcscspn.c \ upstream-freebsd/lib/libc/string/wcscspn.c \
upstream-freebsd/lib/libc/string/wcsdup.c \ upstream-freebsd/lib/libc/string/wcsdup.c \
upstream-freebsd/lib/libc/string/wcslcat.c \ upstream-freebsd/lib/libc/string/wcslcat.c \
upstream-freebsd/lib/libc/string/wcslen.c \
upstream-freebsd/lib/libc/string/wcsncasecmp.c \ upstream-freebsd/lib/libc/string/wcsncasecmp.c \
upstream-freebsd/lib/libc/string/wcsncat.c \ upstream-freebsd/lib/libc/string/wcsncat.c \
upstream-freebsd/lib/libc/string/wcsncmp.c \ upstream-freebsd/lib/libc/string/wcsncmp.c \
upstream-freebsd/lib/libc/string/wcsncpy.c \ upstream-freebsd/lib/libc/string/wcsncpy.c \
upstream-freebsd/lib/libc/string/wcsnlen.c \ upstream-freebsd/lib/libc/string/wcsnlen.c \
upstream-freebsd/lib/libc/string/wcspbrk.c \ upstream-freebsd/lib/libc/string/wcspbrk.c \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wcsspn.c \ upstream-freebsd/lib/libc/string/wcsspn.c \
upstream-freebsd/lib/libc/string/wcstok.c \ upstream-freebsd/lib/libc/string/wcstok.c \
upstream-freebsd/lib/libc/string/wmemchr.c \ upstream-freebsd/lib/libc/string/wmemchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
upstream-freebsd/lib/libc/string/wmemset.c \ upstream-freebsd/lib/libc/string/wmemset.c \
libc_upstream_netbsd_src_files := \ libc_upstream_netbsd_src_files := \
@ -336,7 +306,6 @@ libc_upstream_netbsd_src_files := \
upstream-netbsd/lib/libc/stdlib/nrand48.c \ upstream-netbsd/lib/libc/stdlib/nrand48.c \
upstream-netbsd/lib/libc/stdlib/_rand48.c \ upstream-netbsd/lib/libc/stdlib/_rand48.c \
upstream-netbsd/lib/libc/stdlib/rand_r.c \ upstream-netbsd/lib/libc/stdlib/rand_r.c \
upstream-netbsd/lib/libc/stdlib/reallocarr.c \
upstream-netbsd/lib/libc/stdlib/seed48.c \ upstream-netbsd/lib/libc/stdlib/seed48.c \
upstream-netbsd/lib/libc/stdlib/srand48.c \ upstream-netbsd/lib/libc/stdlib/srand48.c \
upstream-netbsd/lib/libc/string/memccpy.c \ upstream-netbsd/lib/libc/string/memccpy.c \
@ -371,25 +340,11 @@ libc_upstream_openbsd_gdtoa_src_files_64 := \
$(libc_upstream_openbsd_gdtoa_src_files) \ $(libc_upstream_openbsd_gdtoa_src_files) \
upstream-openbsd/lib/libc/gdtoa/strtorQ.c \ upstream-openbsd/lib/libc/gdtoa/strtorQ.c \
# These two depend on getentropy_linux.c, which isn't in libc_ndk.a. # These two depend on getentropy_linux.cpp, which isn't in libc_ndk.a.
libc_upstream_openbsd_src_files := \ libc_upstream_openbsd_src_files := \
upstream-openbsd/lib/libc/crypt/arc4random.c \ upstream-openbsd/lib/libc/crypt/arc4random.c \
upstream-openbsd/lib/libc/crypt/arc4random_uniform.c \ upstream-openbsd/lib/libc/crypt/arc4random_uniform.c \
libc_upstream_openbsd_src_files += \
upstream-openbsd/lib/libc/string/memchr.c \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/memrchr.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
upstream-openbsd/lib/libc/string/strcpy.c \
upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/strlcpy.c \
upstream-openbsd/lib/libc/string/strncat.c \
upstream-openbsd/lib/libc/string/strncmp.c \
upstream-openbsd/lib/libc/string/strncpy.c \
libc_upstream_openbsd_ndk_src_files := \ libc_upstream_openbsd_ndk_src_files := \
upstream-openbsd/lib/libc/compat-43/killpg.c \ upstream-openbsd/lib/libc/compat-43/killpg.c \
upstream-openbsd/lib/libc/gen/alarm.c \ upstream-openbsd/lib/libc/gen/alarm.c \
@ -432,9 +387,11 @@ libc_upstream_openbsd_ndk_src_files := \
upstream-openbsd/lib/libc/locale/wctomb.c \ upstream-openbsd/lib/libc/locale/wctomb.c \
upstream-openbsd/lib/libc/net/htonl.c \ upstream-openbsd/lib/libc/net/htonl.c \
upstream-openbsd/lib/libc/net/htons.c \ upstream-openbsd/lib/libc/net/htons.c \
upstream-openbsd/lib/libc/net/inet_addr.c \
upstream-openbsd/lib/libc/net/inet_lnaof.c \ upstream-openbsd/lib/libc/net/inet_lnaof.c \
upstream-openbsd/lib/libc/net/inet_makeaddr.c \ upstream-openbsd/lib/libc/net/inet_makeaddr.c \
upstream-openbsd/lib/libc/net/inet_netof.c \ upstream-openbsd/lib/libc/net/inet_netof.c \
upstream-openbsd/lib/libc/net/inet_network.c \
upstream-openbsd/lib/libc/net/inet_ntoa.c \ upstream-openbsd/lib/libc/net/inet_ntoa.c \
upstream-openbsd/lib/libc/net/inet_ntop.c \ upstream-openbsd/lib/libc/net/inet_ntop.c \
upstream-openbsd/lib/libc/net/inet_pton.c \ upstream-openbsd/lib/libc/net/inet_pton.c \
@ -494,6 +451,7 @@ libc_upstream_openbsd_ndk_src_files := \
upstream-openbsd/lib/libc/stdio/puts.c \ upstream-openbsd/lib/libc/stdio/puts.c \
upstream-openbsd/lib/libc/stdio/putwc.c \ upstream-openbsd/lib/libc/stdio/putwc.c \
upstream-openbsd/lib/libc/stdio/putwchar.c \ upstream-openbsd/lib/libc/stdio/putwchar.c \
upstream-openbsd/lib/libc/stdio/refill.c \
upstream-openbsd/lib/libc/stdio/remove.c \ upstream-openbsd/lib/libc/stdio/remove.c \
upstream-openbsd/lib/libc/stdio/rewind.c \ upstream-openbsd/lib/libc/stdio/rewind.c \
upstream-openbsd/lib/libc/stdio/rget.c \ upstream-openbsd/lib/libc/stdio/rget.c \
@ -527,16 +485,11 @@ libc_upstream_openbsd_ndk_src_files := \
upstream-openbsd/lib/libc/stdio/wprintf.c \ upstream-openbsd/lib/libc/stdio/wprintf.c \
upstream-openbsd/lib/libc/stdio/wscanf.c \ upstream-openbsd/lib/libc/stdio/wscanf.c \
upstream-openbsd/lib/libc/stdio/wsetup.c \ upstream-openbsd/lib/libc/stdio/wsetup.c \
upstream-openbsd/lib/libc/stdlib/abs.c \
upstream-openbsd/lib/libc/stdlib/atoi.c \ upstream-openbsd/lib/libc/stdlib/atoi.c \
upstream-openbsd/lib/libc/stdlib/atol.c \ upstream-openbsd/lib/libc/stdlib/atol.c \
upstream-openbsd/lib/libc/stdlib/atoll.c \ upstream-openbsd/lib/libc/stdlib/atoll.c \
upstream-openbsd/lib/libc/stdlib/getenv.c \ upstream-openbsd/lib/libc/stdlib/getenv.c \
upstream-openbsd/lib/libc/stdlib/insque.c \ upstream-openbsd/lib/libc/stdlib/insque.c \
upstream-openbsd/lib/libc/stdlib/imaxabs.c \
upstream-openbsd/lib/libc/stdlib/imaxdiv.c \
upstream-openbsd/lib/libc/stdlib/labs.c \
upstream-openbsd/lib/libc/stdlib/llabs.c \
upstream-openbsd/lib/libc/stdlib/lsearch.c \ upstream-openbsd/lib/libc/stdlib/lsearch.c \
upstream-openbsd/lib/libc/stdlib/reallocarray.c \ upstream-openbsd/lib/libc/stdlib/reallocarray.c \
upstream-openbsd/lib/libc/stdlib/remque.c \ upstream-openbsd/lib/libc/stdlib/remque.c \
@ -567,7 +520,6 @@ libc_upstream_openbsd_ndk_src_files := \
libc_pthread_src_files := \ libc_pthread_src_files := \
bionic/pthread_atfork.cpp \ bionic/pthread_atfork.cpp \
bionic/pthread_attr.cpp \ bionic/pthread_attr.cpp \
bionic/pthread_barrier.cpp \
bionic/pthread_cond.cpp \ bionic/pthread_cond.cpp \
bionic/pthread_create.cpp \ bionic/pthread_create.cpp \
bionic/pthread_detach.cpp \ bionic/pthread_detach.cpp \
@ -587,21 +539,21 @@ libc_pthread_src_files := \
bionic/pthread_setname_np.cpp \ bionic/pthread_setname_np.cpp \
bionic/pthread_setschedparam.cpp \ bionic/pthread_setschedparam.cpp \
bionic/pthread_sigmask.cpp \ bionic/pthread_sigmask.cpp \
bionic/pthread_spinlock.cpp \
libc_thread_atexit_impl_src_files := \
bionic/__cxa_thread_atexit_impl.cpp \
libc_arch_static_src_files := \ libc_arch_static_src_files := \
bionic/dl_iterate_phdr_static.cpp \ bionic/dl_iterate_phdr_static.cpp \
# Various kinds of cruft. # Various kinds of LP32 cruft.
# ======================================================== # ========================================================
libc_common_src_files += \ libc_bionic_src_files_32 += \
bionic/ndk_cruft.cpp \
libc_bionic_ndk_src_files_32 += \
bionic/mmap.cpp \ bionic/mmap.cpp \
libc_common_src_files_32 += \ libc_common_src_files_32 += \
bionic/legacy_32_bit_support.cpp \ bionic/legacy_32_bit_support.cpp \
bionic/ndk_cruft.cpp \
bionic/time64.c \ bionic/time64.c \
libc_netbsd_src_files_32 += \ libc_netbsd_src_files_32 += \
@ -617,19 +569,11 @@ libc_common_cflags := \
-D_LIBC=1 \ -D_LIBC=1 \
-Wall -Wextra -Wunused \ -Wall -Wextra -Wunused \
ifneq ($(TARGET_USES_LOGD),false)
libc_common_cflags += -DTARGET_USES_LOGD
endif
use_clang := $(USE_CLANG_PLATFORM_BUILD) use_clang := $(USE_CLANG_PLATFORM_BUILD)
# Clang/llvm has incompatible long double (fp128) for x86_64.
# https://llvm.org/bugs/show_bug.cgi?id=23897
ifeq ($(TARGET_ARCH),x86_64)
use_clang := false
endif
# b/25291096, Clang/llvm compiled libc.so for mips/mips64 failed to boot.
ifeq ($(TARGET_ARCH),$(filter $(TARGET_ARCH),mips mips64))
use_clang := false
endif
ifeq ($(use_clang),) ifeq ($(use_clang),)
use_clang := false use_clang := false
endif endif
@ -645,7 +589,7 @@ ifeq ($(strip $(DEBUG_BIONIC_LIBC)),true)
libc_common_cflags += -DDEBUG libc_common_cflags += -DDEBUG
endif endif
ifeq ($(MALLOC_SVELTE),true) ifeq ($(MALLOC_IMPL),dlmalloc)
libc_common_cflags += -DUSE_DLMALLOC libc_common_cflags += -DUSE_DLMALLOC
libc_malloc_src := bionic/dlmalloc.c libc_malloc_src := bionic/dlmalloc.c
else else
@ -654,12 +598,20 @@ else
libc_common_c_includes += external/jemalloc/include libc_common_c_includes += external/jemalloc/include
endif endif
# To customize dlmalloc's alignment, set BOARD_MALLOC_ALIGNMENT in
# the appropriate BoardConfig.mk file.
#
ifneq ($(BOARD_MALLOC_ALIGNMENT),)
libc_common_cflags += -DMALLOC_ALIGNMENT=$(BOARD_MALLOC_ALIGNMENT)
endif
# Define some common conlyflags # Define some common conlyflags
libc_common_conlyflags := \ libc_common_conlyflags := \
-std=gnu99 -std=gnu99
# Define some common cppflags # Define some common cppflags
libc_common_cppflags := \ libc_common_cppflags := \
-std=gnu++11
# Define some common includes # Define some common includes
# ======================================================== # ========================================================
@ -684,21 +636,13 @@ endef
# libc_stack_protector.a - stack protector code # libc_stack_protector.a - stack protector code
# ======================================================== # ========================================================
# #
# Code that implements the stack protector (or that runs # The stack protector code needs to be compiled
# before TLS has been set up) needs to be compiled with # with -fno-stack-protector, since it modifies the
# -fno-stack-protector, since it accesses the stack canary # stack canary.
# TLS slot.
include $(CLEAR_VARS) include $(CLEAR_VARS)
LOCAL_SRC_FILES := \ LOCAL_SRC_FILES := bionic/__stack_chk_fail.cpp
bionic/__libc_init_main_thread.cpp \
bionic/__stack_chk_fail.cpp \
LOCAL_SRC_FILES_arm64 := arch-arm64/bionic/__set_tls.c
LOCAL_SRC_FILES_x86 := arch-x86/bionic/__set_tls.c
LOCAL_SRC_FILES_x86_64 := arch-x86_64/bionic/__set_tls.c
LOCAL_CFLAGS := $(libc_common_cflags) -fno-stack-protector LOCAL_CFLAGS := $(libc_common_cflags) -fno-stack-protector
LOCAL_CONLYFLAGS := $(libc_common_conlyflags) LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) LOCAL_CPPFLAGS := $(libc_common_cppflags)
@ -708,31 +652,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
include $(BUILD_STATIC_LIBRARY)
# libc_init_static.cpp also needs to be built without stack protector,
# because it's responsible for setting up TLS for static executables.
# This isn't the case for dynamic executables because the dynamic linker
# has already set up the main thread's TLS.
include $(CLEAR_VARS)
LOCAL_SRC_FILES := bionic/libc_init_static.cpp
LOCAL_CFLAGS := $(libc_common_cflags) -fno-stack-protector
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_init_static
LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -751,22 +671,17 @@ LOCAL_SRC_FILES += upstream-openbsd/lib/libc/time/wcsftime.c
LOCAL_CFLAGS := $(libc_common_cflags) \ LOCAL_CFLAGS := $(libc_common_cflags) \
-fvisibility=hidden \ -fvisibility=hidden \
-Wno-unused-parameter \
# Don't use ridiculous amounts of stack. # Don't use ridiculous amounts of stack.
LOCAL_CFLAGS += -DALL_STATE LOCAL_CFLAGS += -DALL_STATE
# Include tzsetwall, timelocal, timegm, time2posix, and posix2time. # Include tzsetwall, timelocal, timegm, time2posix, and posix2time.
LOCAL_CFLAGS += -DSTD_INSPIRED LOCAL_CFLAGS += -DSTD_INSPIRED
# Obviously, we want to be thread-safe.
LOCAL_CFLAGS += -DTHREAD_SAFE
# The name of the tm_gmtoff field in our struct tm. # The name of the tm_gmtoff field in our struct tm.
LOCAL_CFLAGS += -DTM_GMTOFF=tm_gmtoff LOCAL_CFLAGS += -DTM_GMTOFF=tm_gmtoff
# Where we store our tzdata. # Where we store our tzdata.
LOCAL_CFLAGS += -DTZDIR=\"/system/usr/share/zoneinfo\" LOCAL_CFLAGS += -DTZDIR=\"/system/usr/share/zoneinfo\"
# Include timezone and daylight globals. # Include timezone and daylight globals.
LOCAL_CFLAGS += -DUSG_COMPAT=1 LOCAL_CFLAGS += -DUSG_COMPAT=1
# Use the empty string (instead of " ") as the timezone abbreviation fallback.
LOCAL_CFLAGS += -DWILDABBR=\"\"
LOCAL_CFLAGS += -DNO_RUN_TIME_WARNINGS_ABOUT_YEAR_2000_PROBLEMS_THANK_YOU LOCAL_CFLAGS += -DNO_RUN_TIME_WARNINGS_ABOUT_YEAR_2000_PROBLEMS_THANK_YOU
LOCAL_CFLAGS += -Dlint LOCAL_CFLAGS += -Dlint
@ -778,7 +693,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -824,7 +739,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -857,11 +772,11 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES_EXCLUDE,libc_freebsd_src_files_exclude)) $(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_freebsd_src_files))
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)
@ -892,7 +807,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -933,7 +848,7 @@ LOCAL_MODULE := libc_openbsd_ndk
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -971,12 +886,11 @@ LOCAL_MODULE := libc_openbsd
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_openbsd_src_files)) $(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_openbsd_src_files))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES_EXCLUDE,libc_openbsd_src_files_exclude))
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)
@ -1010,7 +924,7 @@ LOCAL_MODULE := libc_gdtoa
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -1027,6 +941,10 @@ LOCAL_SRC_FILES := $(libc_bionic_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \ LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \ -Wframe-larger-than=2048 \
# memcpy.S, memchr.S, etc. do not compile with Clang.
LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
LOCAL_CONLYFLAGS := $(libc_common_conlyflags) LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include
@ -1035,12 +953,10 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_bionic_src_files))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES_EXCLUDE,libc_bionic_src_files_exclude))
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)
@ -1056,6 +972,10 @@ LOCAL_SRC_FILES := $(libc_bionic_ndk_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \ LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \ -Wframe-larger-than=2048 \
# memcpy.S, memchr.S, etc. do not compile with Clang.
LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
LOCAL_CONLYFLAGS := $(libc_common_conlyflags) LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include
@ -1064,13 +984,31 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_bionic_ndk_src_files)) $(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_bionic_src_files))
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(libc_thread_atexit_impl_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) -Wframe-larger-than=2048
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_thread_atexit_impl
# TODO: Clang tries to use __tls_get_addr which is not supported yet
# remove after it is implemented.
LOCAL_CLANG := false
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY)
# ======================================================== # ========================================================
# libc_pthread.a - pthreads parts that previously lived in # libc_pthread.a - pthreads parts that previously lived in
@ -1086,6 +1024,10 @@ LOCAL_SRC_FILES := $(libc_pthread_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \ LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \ -Wframe-larger-than=2048 \
# memcpy.S, memchr.S, etc. do not compile with Clang.
LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
LOCAL_CONLYFLAGS := $(libc_common_conlyflags) LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes) LOCAL_C_INCLUDES := $(libc_common_c_includes)
@ -1094,7 +1036,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)
@ -1118,7 +1060,7 @@ LOCAL_CLANG := true # GCC refuses to hide new/delete
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
# b/17574078: Need to disable coverage until we have a prebuilt libprofile_rt. # b/17574078: Need to disable coverage until we have a prebuilt libprofile_rt.
# Since this is a static library built with clang, it needs to link # Since this is a static library built with clang, it needs to link
# libprofile_rt when it is linked into the final binary. Since the final binary # libprofile_rt when it is linked into the final binary. Since the final binary
@ -1146,7 +1088,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)
@ -1168,7 +1110,7 @@ LOCAL_CFLAGS := $(libc_common_cflags) -fno-builtin
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)
@ -1194,7 +1136,7 @@ LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CFLAGS := $(libc_common_cflags) -fvisibility=hidden -O0 LOCAL_CFLAGS := $(libc_common_cflags) -fvisibility=hidden -O0
LOCAL_CPPFLAGS := $(libc_common_cppflags) LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_C_INCLUDES := $(libc_common_c_includes) LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
@ -1228,7 +1170,7 @@ LOCAL_WHOLE_STATIC_LIBRARIES := \
LOCAL_WHOLE_STATIC_LIBRARIES_arm := libc_aeabi LOCAL_WHOLE_STATIC_LIBRARIES_arm := libc_aeabi
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
ifneq ($(MALLOC_SVELTE),true) ifneq ($(MALLOC_IMPL),dlmalloc)
LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc
endif endif
@ -1268,6 +1210,7 @@ LOCAL_WHOLE_STATIC_LIBRARIES := \
libc_pthread \ libc_pthread \
libc_stack_protector \ libc_stack_protector \
libc_syscalls \ libc_syscalls \
libc_thread_atexit_impl \
libc_tzcode \ libc_tzcode \
LOCAL_WHOLE_STATIC_LIBRARIES_arm := libc_aeabi LOCAL_WHOLE_STATIC_LIBRARIES_arm := libc_aeabi
@ -1277,7 +1220,7 @@ LOCAL_SYSTEM_SHARED_LIBRARIES :=
# TODO: split out the asflags. # TODO: split out the asflags.
LOCAL_ASFLAGS := $(LOCAL_CFLAGS) LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -1300,6 +1243,7 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES := \ LOCAL_SRC_FILES := \
$(libc_arch_static_src_files) \ $(libc_arch_static_src_files) \
bionic/libc_init_static.cpp
LOCAL_C_INCLUDES := $(libc_common_c_includes) LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_CFLAGS := $(libc_common_cflags) \ LOCAL_CFLAGS := $(libc_common_cflags) \
@ -1311,10 +1255,10 @@ LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_MODULE := libc_nomalloc LOCAL_MODULE := libc_nomalloc
LOCAL_CLANG := $(use_clang) LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_WHOLE_STATIC_LIBRARIES := libc_common libc_init_static LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -1337,7 +1281,7 @@ LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_malloc LOCAL_MODULE := libc_malloc
LOCAL_CLANG := $(use_clang) LOCAL_CLANG := $(use_clang)
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)
@ -1351,6 +1295,7 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES := \ LOCAL_SRC_FILES := \
$(libc_arch_static_src_files) \ $(libc_arch_static_src_files) \
bionic/malloc_debug_common.cpp \ bionic/malloc_debug_common.cpp \
bionic/libc_init_static.cpp \
LOCAL_CFLAGS := $(libc_common_cflags) \ LOCAL_CFLAGS := $(libc_common_cflags) \
-DLIBC_STATIC \ -DLIBC_STATIC \
@ -1361,15 +1306,15 @@ LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc LOCAL_MODULE := libc
LOCAL_CLANG := $(use_clang) LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies) LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_WHOLE_STATIC_LIBRARIES := libc_common libc_init_static LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
ifneq ($(MALLOC_SVELTE),true) ifneq ($(MALLOC_IMPL),dlmalloc)
LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc
endif endif
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -1401,15 +1346,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_REQUIRED_MODULES := tzdata LOCAL_REQUIRED_MODULES := tzdata
LOCAL_ADDITIONAL_DEPENDENCIES := \ LOCAL_ADDITIONAL_DEPENDENCIES := \
$(libc_common_additional_dependencies) \ $(libc_common_additional_dependencies) \
$(LOCAL_PATH)/libc.arm.map \ $(LOCAL_PATH)/version_script.txt \
$(LOCAL_PATH)/libc.arm64.map \
$(LOCAL_PATH)/libc.mips.map \
$(LOCAL_PATH)/libc.mips64.map \
$(LOCAL_PATH)/libc.x86.map \
$(LOCAL_PATH)/libc.x86_64.map \
$(LOCAL_PATH)/libc.arm.brillo.map \
$(LOCAL_PATH)/libc.mips.brillo.map \
$(LOCAL_PATH)/libc.x86.brillo.map \
# Leave the symbols in the shared library so that stack unwinders can produce # Leave the symbols in the shared library so that stack unwinders can produce
# meaningful name resolution. # meaningful name resolution.
@ -1429,40 +1366,24 @@ LOCAL_PACK_MODULE_RELOCATIONS := false
LOCAL_SHARED_LIBRARIES := libdl LOCAL_SHARED_LIBRARIES := libdl
LOCAL_WHOLE_STATIC_LIBRARIES := libc_common LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
ifneq ($(MALLOC_SVELTE),true) ifneq ($(MALLOC_IMPL),dlmalloc)
LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc
endif endif
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := LOCAL_SYSTEM_SHARED_LIBRARIES :=
# TODO: This is to work around b/24465209. Remove after root cause is fixed
LOCAL_LDFLAGS_arm := -Wl,--hash-style=both
LOCAL_LDFLAGS_x86 := -Wl,--hash-style=both
# Don't re-export new/delete and friends, even if the compiler really wants to. # Don't re-export new/delete and friends, even if the compiler really wants to.
ifdef BRILLO LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/version_script.txt
LOCAL_LDFLAGS_arm += -Wl,--version-script,$(LOCAL_PATH)/libc.arm.brillo.map
LOCAL_LDFLAGS_mips += -Wl,--version-script,$(LOCAL_PATH)/libc.mips.brillo.map
LOCAL_LDFLAGS_x86 += -Wl,--version-script,$(LOCAL_PATH)/libc.x86.brillo.map
else
LOCAL_LDFLAGS_arm += -Wl,--version-script,$(LOCAL_PATH)/libc.arm.map
LOCAL_LDFLAGS_mips += -Wl,--version-script,$(LOCAL_PATH)/libc.mips.map
LOCAL_LDFLAGS_x86 += -Wl,--version-script,$(LOCAL_PATH)/libc.x86.map
endif
LOCAL_LDFLAGS_arm64 += -Wl,--version-script,$(LOCAL_PATH)/libc.arm64.map
LOCAL_LDFLAGS_mips64 += -Wl,--version-script,$(LOCAL_PATH)/libc.mips64.map
LOCAL_LDFLAGS_x86_64 += -Wl,--version-script,$(LOCAL_PATH)/libc.x86_64.map
# We'd really like to do this for all architectures, but since this wasn't done # We'd really like to do this for all architectures, but since this wasn't done
# before, these symbols must continue to be exported on LP32 for binary # before, these symbols must continue to be exported on LP32 for binary
# compatibility. # compatibility.
LOCAL_LDFLAGS_64 := -Wl,--exclude-libs,libgcc.a # TODO: disabled for http://b/20065774.
#LOCAL_LDFLAGS_64 := -Wl,--exclude-libs,libgcc.a
# Unfortunately --exclude-libs clobbers our version script, so we have to # TODO: This is to work around b/19059885. Remove after root cause is fixed
# prevent the build system from using this flag. LOCAL_LDFLAGS_arm := -Wl,--hash-style=sysv
LOCAL_NO_EXCLUDE_LIBS := true
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_arch_dynamic_src_files)) $(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_arch_dynamic_src_files))
@ -1475,7 +1396,7 @@ LOCAL_CFLAGS_arm += -DCRT_LEGACY_WORKAROUND
LOCAL_SRC_FILES_arm += \ LOCAL_SRC_FILES_arm += \
arch-arm/bionic/atexit_legacy.c arch-arm/bionic/atexit_legacy.c
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_SHARED_LIBRARY) include $(BUILD_SHARED_LIBRARY)
@ -1503,8 +1424,8 @@ LOCAL_C_INCLUDES := \
$(libc_common_c_includes) \ $(libc_common_c_includes) \
LOCAL_SRC_FILES := \ LOCAL_SRC_FILES := \
bionic/debug_backtrace.cpp \
bionic/debug_mapinfo.cpp \ bionic/debug_mapinfo.cpp \
bionic/debug_stacktrace.cpp \
bionic/libc_logging.cpp \ bionic/libc_logging.cpp \
bionic/malloc_debug_leak.cpp \ bionic/malloc_debug_leak.cpp \
bionic/malloc_debug_check.cpp \ bionic/malloc_debug_check.cpp \
@ -1521,20 +1442,15 @@ LOCAL_SYSTEM_SHARED_LIBRARIES :=
# Only need this for arm since libc++ uses its own unwind code that # Only need this for arm since libc++ uses its own unwind code that
# doesn't mix with the other default unwind code. # doesn't mix with the other default unwind code.
LOCAL_STATIC_LIBRARIES_arm := libunwind_llvm LOCAL_STATIC_LIBRARIES_arm := libunwind_llvm
LOCAL_LDFLAGS_arm := -Wl,--exclude-libs,libunwind_llvm.a
LOCAL_STATIC_LIBRARIES += libc++abi LOCAL_STATIC_LIBRARIES += libc++abi
LOCAL_ALLOW_UNDEFINED_SYMBOLS := true LOCAL_ALLOW_UNDEFINED_SYMBOLS := true
# Don't re-export new/delete and friends, even if the compiler really wants to. # Don't re-export new/delete and friends, even if the compiler really wants to.
LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/version_script.txt LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/version_script.txt
# Unfortunately --exclude-libs clobbers our version script, so we have to
# prevent the build system from using this flag.
LOCAL_NO_EXCLUDE_LIBS := true
# Don't install on release build # Don't install on release build
LOCAL_MODULE_TAGS := eng debug LOCAL_MODULE_TAGS := eng debug
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -1572,13 +1488,9 @@ LOCAL_SYSTEM_SHARED_LIBRARIES :=
# Don't re-export new/delete and friends, even if the compiler really wants to. # Don't re-export new/delete and friends, even if the compiler really wants to.
LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/version_script.txt LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/version_script.txt
# Unfortunately --exclude-libs clobbers our version script, so we have to
# prevent the build system from using this flag.
LOCAL_NO_EXCLUDE_LIBS := true
# Don't install on release build # Don't install on release build
LOCAL_MODULE_TAGS := eng debug LOCAL_MODULE_TAGS := eng debug
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags)) $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@ -1600,16 +1512,15 @@ LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include
LOCAL_CFLAGS := $(libc_common_cflags) LOCAL_CFLAGS := $(libc_common_cflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) LOCAL_CPPFLAGS := $(libc_common_cppflags)
# TODO: This is to work around b/24465209. Remove after root cause is fixed # TODO: This is to work around b/19059885. Remove after root cause is fixed
LOCAL_LDFLAGS_arm := -Wl,--hash-style=both LOCAL_LDFLAGS_arm := -Wl,--hash-style=sysv
LOCAL_LDFLAGS_x86 := -Wl,--hash-style=both
LOCAL_SRC_FILES := $(libstdcxx_common_src_files) LOCAL_SRC_FILES := $(libstdcxx_common_src_files)
LOCAL_MODULE:= libstdc++ LOCAL_MODULE:= libstdc++
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := libc LOCAL_SYSTEM_SHARED_LIBRARIES := libc
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_SHARED_LIBRARY) include $(BUILD_SHARED_LIBRARY)
@ -1625,7 +1536,7 @@ LOCAL_MODULE:= libstdc++
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_CXX_STL := none LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := libc LOCAL_SYSTEM_SHARED_LIBRARIES := libc
LOCAL_SANITIZE := never LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage) LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY) include $(BUILD_STATIC_LIBRARY)

View File

@ -307,6 +307,50 @@ SUCH DAMAGE.
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright (C) 2008-2010 The Android Open Source Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (C) 2009 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------
Copyright (C) 2009 The Android Open Source Project Copyright (C) 2009 The Android Open Source Project
All rights reserved. All rights reserved.
@ -410,6 +454,22 @@ Android adaptation and tweak by Jim Huang <jserv@0xlab.org>.
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright (C) 2011 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------
Copyright (C) 2011 The Android Open Source Project Copyright (C) 2011 The Android Open Source Project
All rights reserved. All rights reserved.
@ -628,50 +688,6 @@ SUCH DAMAGE.
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright (C) 2015 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------
Copyright (C) 2015 The Android Open Source Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 1980, 1983, 1988, 1993 Copyright (c) 1980, 1983, 1988, 1993
The Regents of the University of California. All rights reserved. The Regents of the University of California. All rights reserved.
@ -2534,6 +2550,33 @@ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright (c) 1995, 1996 Carnegie-Mellon University.
All rights reserved.
Author: Chris G. Demetriou
Permission to use, copy, modify and distribute this software and
its documentation is hereby granted, provided that both the copyright
notice and this permission notice appear in all copies of the
software, derivative works or modified versions, and any portions
thereof, and that both notices appear in supporting documentation.
CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
Carnegie Mellon requests users of this software to return to
Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
School of Computer Science
Carnegie Mellon University
Pittsburgh PA 15213-3890
any improvements or extensions that they make and grant Carnegie the
rights to redistribute these changes.
-------------------------------------------------------------------
Copyright (c) 1996 by Internet Software Consortium. Copyright (c) 1996 by Internet Software Consortium.
Permission to use, copy, modify, and distribute this software for any Permission to use, copy, modify, and distribute this software for any
@ -3752,22 +3795,6 @@ OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright (c) 2007 Todd C. Miller <Todd.Miller@courtesan.com>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-------------------------------------------------------------------
Copyright (c) 2007-2008 Michael G Schwern Copyright (c) 2007-2008 Michael G Schwern
This software originally derived from Paul Sheer's pivotal_gmtime_r.c. This software originally derived from Paul Sheer's pivotal_gmtime_r.c.
@ -3905,6 +3932,35 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright (c) 2009
MIPS Technologies, Inc., California.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the MIPS Technologies, Inc., nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2009 David Schultz <das@FreeBSD.org> Copyright (c) 2009 David Schultz <das@FreeBSD.org>
All rights reserved. All rights reserved.
@ -4386,64 +4442,6 @@ Copyright (c) 2012, Linaro Limited
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright (c) 2012-2015
MIPS Technologies, Inc., California.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the MIPS Technologies, Inc., nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2013
MIPS Technologies, Inc., California.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the MIPS Technologies, Inc., nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2013 ARM Ltd Copyright (c) 2013 ARM Ltd
All rights reserved. All rights reserved.
@ -4517,35 +4515,6 @@ POSSIBILITY OF SUCH DAMAGE.
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright (c) 2014
Imagination Technologies Limited.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the MIPS Technologies, Inc., nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY IMAGINATION TECHNOLOGIES LIMITED ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL IMAGINATION TECHNOLOGIES LIMITED BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 2014 Theo de Raadt <deraadt@openbsd.org> Copyright (c) 2014 Theo de Raadt <deraadt@openbsd.org>
Copyright (c) 2014 Bob Beck <beck@obtuse.com> Copyright (c) 2014 Bob Beck <beck@obtuse.com>
@ -4764,6 +4733,31 @@ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------------------------------------------------------- -------------------------------------------------------------------
Copyright 2000 David E. O'Brien, John D. Polstra.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright 2008 Android Open Source Project (source port randomization) Copyright 2008 Android Open Source Project (source port randomization)
Copyright (c) 1985, 1989, 1993 Copyright (c) 1985, 1989, 1993
The Regents of the University of California. All rights reserved. The Regents of the University of California. All rights reserved.

View File

@ -77,6 +77,7 @@ int prlimit64(pid_t, int, struct rlimit64*, const struct rlimit64*) arm,mips,x8
int setgroups:setgroups32(int, const gid_t*) arm,x86 int setgroups:setgroups32(int, const gid_t*) arm,x86
int setgroups:setgroups(int, const gid_t*) arm64,mips,mips64,x86_64 int setgroups:setgroups(int, const gid_t*) arm64,mips,mips64,x86_64
int setpgid(pid_t, pid_t) all int setpgid(pid_t, pid_t) all
pid_t vfork(void) arm
int setregid:setregid32(gid_t, gid_t) arm,x86 int setregid:setregid32(gid_t, gid_t) arm,x86
int setregid:setregid(gid_t, gid_t) arm64,mips,mips64,x86_64 int setregid:setregid(gid_t, gid_t) arm64,mips,mips64,x86_64
int chroot(const char*) all int chroot(const char*) all
@ -94,20 +95,10 @@ ssize_t pread64(int, void*, size_t, off64_t) arm,mips,x86
ssize_t pread64|pread(int, void*, size_t, off_t) arm64,mips64,x86_64 ssize_t pread64|pread(int, void*, size_t, off_t) arm64,mips64,x86_64
ssize_t pwrite64(int, void*, size_t, off64_t) arm,mips,x86 ssize_t pwrite64(int, void*, size_t, off64_t) arm,mips,x86
ssize_t pwrite64|pwrite(int, void*, size_t, off_t) arm64,mips64,x86_64 ssize_t pwrite64|pwrite(int, void*, size_t, off_t) arm64,mips64,x86_64
# On LP32, preadv/pwritev don't use off64_t --- they use pairs of 32-bit
# arguments to avoid problems on architectures like ARM where 64-bit arguments
# must be in a register pair starting with an even-numbered register.
# See linux/fs/read_write.c and https://lwn.net/Articles/311630/.
ssize_t __preadv64:preadv(int, const struct iovec*, int, long, long) arm,mips,x86
ssize_t preadv|preadv64(int, const struct iovec*, int, off_t) arm64,mips64,x86_64
ssize_t __pwritev64:pwritev(int, const struct iovec*, int, long, long) arm,mips,x86
ssize_t pwritev|pwritev64(int, const struct iovec*, int, off_t) arm64,mips64,x86_64
int ___close:close(int) all int ___close:close(int) all
pid_t __getpid:getpid() all pid_t __getpid:getpid() all
int munmap(void*, size_t) all int munmap(void*, size_t) all
void* ___mremap:mremap(void*, size_t, size_t, int, void*) all void* mremap(void*, size_t, size_t, unsigned long) all
int msync(const void*, size_t, int) all int msync(const void*, size_t, int) all
int mprotect(const void*, size_t, int) all int mprotect(const void*, size_t, int) all
int madvise(void*, size_t, int) all int madvise(void*, size_t, int) all
@ -131,9 +122,9 @@ int fdatasync(int) all
int fchown:fchown32(int, uid_t, gid_t) arm,x86 int fchown:fchown32(int, uid_t, gid_t) arm,x86
int fchown:fchown(int, uid_t, gid_t) arm64,mips,mips64,x86_64 int fchown:fchown(int, uid_t, gid_t) arm64,mips,mips64,x86_64
void sync(void) all void sync(void) all
int ___fsetxattr:fsetxattr(int, const char*, const void*, size_t, int) all int fsetxattr(int, const char*, const void*, size_t, int) all
ssize_t ___fgetxattr:fgetxattr(int, const char*, void*, size_t) all ssize_t fgetxattr(int, const char*, void*, size_t) all
ssize_t ___flistxattr:flistxattr(int, char*, size_t) all ssize_t flistxattr(int, char*, size_t) all
int fremovexattr(int, const char*) all int fremovexattr(int, const char*) all
int __getdents64:getdents64(unsigned int, struct dirent*, unsigned int) arm,arm64,mips,mips64,x86,x86_64 int __getdents64:getdents64(unsigned int, struct dirent*, unsigned int) arm,arm64,mips,mips64,x86,x86_64
@ -160,6 +151,7 @@ int utimensat(int, const char*, const struct timespec times[2], int) all
off_t lseek(int, off_t, int) arm,mips,x86 off_t lseek(int, off_t, int) arm,mips,x86
int __llseek:_llseek(int, unsigned long, unsigned long, off64_t*, int) arm,mips,x86 int __llseek:_llseek(int, unsigned long, unsigned long, off64_t*, int) arm,mips,x86
off_t lseek|lseek64(int, off_t, int) arm64,mips64,x86_64 off_t lseek|lseek64(int, off_t, int) arm64,mips64,x86_64
int ftruncate(int, off_t) arm,mips,x86
int ftruncate64(int, off64_t) arm,mips,x86 int ftruncate64(int, off64_t) arm,mips,x86
int ftruncate|ftruncate64(int, off_t) arm64,mips64,x86_64 int ftruncate|ftruncate64(int, off_t) arm64,mips64,x86_64
ssize_t sendfile(int out_fd, int in_fd, off_t* offset, size_t count) arm,mips,x86 ssize_t sendfile(int out_fd, int in_fd, off_t* offset, size_t count) arm,mips,x86
@ -212,7 +204,7 @@ clock_t times(struct tms*) all
int nanosleep(const struct timespec*, struct timespec*) all int nanosleep(const struct timespec*, struct timespec*) all
int clock_settime(clockid_t, const struct timespec*) all int clock_settime(clockid_t, const struct timespec*) all
int clock_getres(clockid_t, struct timespec*) all int clock_getres(clockid_t, struct timespec*) all
int ___clock_nanosleep:clock_nanosleep(clockid_t, int, const struct timespec*, struct timespec*) all int __clock_nanosleep:clock_nanosleep(clockid_t, int, const struct timespec*, struct timespec*) all
int getitimer(int, const struct itimerval*) all int getitimer(int, const struct itimerval*) all
int setitimer(int, const struct itimerval*, struct itimerval*) all int setitimer(int, const struct itimerval*, struct itimerval*) all
int __timer_create:timer_create(clockid_t clockid, struct sigevent* evp, __kernel_timer_t* timerid) all int __timer_create:timer_create(clockid_t clockid, struct sigevent* evp, __kernel_timer_t* timerid) all
@ -231,7 +223,7 @@ int __rt_sigpending:rt_sigpending(sigset_t*, size_t) all
int __rt_sigprocmask:rt_sigprocmask(int, const sigset_t*, sigset_t*, size_t) all int __rt_sigprocmask:rt_sigprocmask(int, const sigset_t*, sigset_t*, size_t) all
int __rt_sigsuspend:rt_sigsuspend(const sigset_t*, size_t) all int __rt_sigsuspend:rt_sigsuspend(const sigset_t*, size_t) all
int __rt_sigtimedwait:rt_sigtimedwait(const sigset_t*, struct siginfo_t*, struct timespec_t*, size_t) all int __rt_sigtimedwait:rt_sigtimedwait(const sigset_t*, struct siginfo_t*, struct timespec_t*, size_t) all
int ___rt_sigqueueinfo:rt_sigqueueinfo(pid_t, int, siginfo_t*) all int __rt_sigqueueinfo:rt_sigqueueinfo(pid_t, int, siginfo_t*) all
int __signalfd4:signalfd4(int, const sigset_t*, size_t, int) all int __signalfd4:signalfd4(int, const sigset_t*, size_t, int) all
# sockets # sockets
@ -317,9 +309,6 @@ int inotify_rm_watch(int, unsigned int) all
int __pselect6:pselect6(int, fd_set*, fd_set*, fd_set*, timespec*, void*) all int __pselect6:pselect6(int, fd_set*, fd_set*, fd_set*, timespec*, void*) all
int __ppoll:ppoll(pollfd*, unsigned int, timespec*, const sigset_t*, size_t) all int __ppoll:ppoll(pollfd*, unsigned int, timespec*, const sigset_t*, size_t) all
ssize_t process_vm_readv(pid_t, const struct iovec*, unsigned long, const struct iovec*, unsigned long, unsigned long) all
ssize_t process_vm_writev(pid_t, const struct iovec*, unsigned long, const struct iovec*, unsigned long, unsigned long) all
int __set_tid_address:set_tid_address(int*) all int __set_tid_address:set_tid_address(int*) all
int setfsgid(gid_t) all int setfsgid(gid_t) all
@ -342,7 +331,7 @@ int __set_tls:set_thread_area(void*) mips,mips64
int __set_thread_area:set_thread_area(void*) x86 int __set_thread_area:set_thread_area(void*) x86
# vdso stuff. # vdso stuff.
int clock_gettime(clockid_t, timespec*) arm,mips,mips64 int clock_gettime(clockid_t, timespec*) arm,mips,mips64,x86
int __clock_gettime:clock_gettime(clockid_t, timespec*) arm64,x86,x86_64 int __clock_gettime:clock_gettime(clockid_t, timespec*) arm64,x86_64
int gettimeofday(timeval*, timezone*) arm,mips,mips64 int gettimeofday(timeval*, timezone*) arm,mips,mips64,x86
int __gettimeofday:gettimeofday(timeval*, timezone*) arm64,x86,x86_64 int __gettimeofday:gettimeofday(timeval*, timezone*) arm64,x86_64

View File

@ -1,19 +1,33 @@
# 32-bit arm. # 32-bit arm.
#
# Default implementations of functions that are commonly optimized.
#
libc_bionic_src_files_arm += \ libc_bionic_src_files_arm += \
arch-arm/generic/bionic/memcmp.S \ bionic/strchr.cpp \
arch-arm/generic/bionic/memcpy.S \ bionic/strnlen.c \
arch-arm/generic/bionic/memset.S \ bionic/strrchr.cpp \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
libc_bionic_src_files_exclude_arm += \ libc_freebsd_src_files_arm += \
bionic/__memcpy_chk.cpp \ upstream-freebsd/lib/libc/string/wcscat.c \
bionic/__memset_chk.cpp \ upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
upstream-freebsd/lib/libc/string/wcscpy.c \
upstream-freebsd/lib/libc/string/wcslen.c \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_exclude_arm += \ libc_openbsd_src_files_arm += \
upstream-openbsd/lib/libc/string/strcpy.c \ upstream-openbsd/lib/libc/string/memchr.c \
upstream-openbsd/lib/libc/string/memrchr.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/strlcpy.c \
upstream-openbsd/lib/libc/string/strncat.c \
upstream-openbsd/lib/libc/string/strncmp.c \
upstream-openbsd/lib/libc/string/strncpy.c \
# #
# Inherently architecture-specific code. # Inherently architecture-specific code.
@ -25,11 +39,10 @@ libc_bionic_src_files_arm += \
arch-arm/bionic/__bionic_clone.S \ arch-arm/bionic/__bionic_clone.S \
arch-arm/bionic/_exit_with_stack_teardown.S \ arch-arm/bionic/_exit_with_stack_teardown.S \
arch-arm/bionic/libgcc_compat.c \ arch-arm/bionic/libgcc_compat.c \
arch-arm/bionic/popcount_tab.c \ arch-arm/bionic/libgcc_protect_unwind.c \
arch-arm/bionic/__restore.S \ arch-arm/bionic/__restore.S \
arch-arm/bionic/setjmp.S \ arch-arm/bionic/setjmp.S \
arch-arm/bionic/syscall.S \ arch-arm/bionic/syscall.S \
arch-arm/bionic/vfork.S \
libc_arch_static_src_files_arm := arch-arm/bionic/exidx_static.c libc_arch_static_src_files_arm := arch-arm/bionic/exidx_static.c
libc_arch_dynamic_src_files_arm := arch-arm/bionic/exidx_dynamic.c libc_arch_dynamic_src_files_arm := arch-arm/bionic/exidx_dynamic.c
@ -38,7 +51,6 @@ libc_arch_dynamic_src_files_arm := arch-arm/bionic/exidx_dynamic.c
ifeq ($(strip $(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)),) ifeq ($(strip $(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)),)
$(warning TARGET_$(my_2nd_arch_prefix)ARCH is arm, but TARGET_$(my_2nd_arch_prefix)CPU_VARIANT is not defined) $(warning TARGET_$(my_2nd_arch_prefix)ARCH is arm, but TARGET_$(my_2nd_arch_prefix)CPU_VARIANT is not defined)
endif endif
ifneq ($(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT),generic)
cpu_variant_mk := $(LOCAL_PATH)/arch-arm/$(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)/$(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT).mk cpu_variant_mk := $(LOCAL_PATH)/arch-arm/$(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)/$(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT).mk
ifeq ($(wildcard $(cpu_variant_mk)),) ifeq ($(wildcard $(cpu_variant_mk)),)
$(error "TARGET_$(my_2nd_arch_prefix)CPU_VARIANT not set or set to an unknown value. Possible values are cortex-a7, cortex-a8, cortex-a9, cortex-a15, krait, denver. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.") $(error "TARGET_$(my_2nd_arch_prefix)CPU_VARIANT not set or set to an unknown value. Possible values are cortex-a7, cortex-a8, cortex-a9, cortex-a15, krait, denver. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
@ -47,7 +59,6 @@ include $(cpu_variant_mk)
libc_common_additional_dependencies += $(cpu_variant_mk) libc_common_additional_dependencies += $(cpu_variant_mk)
cpu_variant_mk := cpu_variant_mk :=
endif
libc_crt_target_cflags_arm := \ libc_crt_target_cflags_arm := \

View File

@ -51,62 +51,34 @@ extern int __cxa_atexit(void (*)(void*), void*, void*);
*/ */
int __attribute__((weak)) int __attribute__((weak))
__aeabi_atexit_impl(void *object, void (*destructor) (void *), void *dso_handle) { __aeabi_atexit(void *object, void (*destructor) (void *), void *dso_handle) {
return __cxa_atexit(destructor, object, dso_handle);
}
int __attribute__((weak))
__aeabi_atexit_impl2(void *object, void (*destructor) (void *), void *dso_handle) {
return __cxa_atexit(destructor, object, dso_handle); return __cxa_atexit(destructor, object, dso_handle);
} }
void __attribute__((weak)) __aeabi_memcpy8_impl(void *dest, const void *src, size_t n) { void __attribute__((weak))
__aeabi_memcpy8(void *dest, const void *src, size_t n) {
memcpy(dest, src, n); memcpy(dest, src, n);
} }
void __attribute__((weak)) __aeabi_memcpy4_impl(void *dest, const void *src, size_t n) { void __attribute__((weak)) __aeabi_memcpy4(void *dest, const void *src, size_t n) {
memcpy(dest, src, n); memcpy(dest, src, n);
} }
void __attribute__((weak)) __aeabi_memcpy_impl(void *dest, const void *src, size_t n) { void __attribute__((weak)) __aeabi_memcpy(void *dest, const void *src, size_t n) {
memcpy(dest, src, n);
}
void __attribute__((weak)) __aeabi_memcpy8_impl2(void *dest, const void *src, size_t n) {
memcpy(dest, src, n);
}
void __attribute__((weak)) __aeabi_memcpy4_impl2(void *dest, const void *src, size_t n) {
memcpy(dest, src, n);
}
void __attribute__((weak)) __aeabi_memcpy_impl2(void *dest, const void *src, size_t n) {
memcpy(dest, src, n); memcpy(dest, src, n);
} }
void __attribute__((weak)) __aeabi_memmove8_impl(void *dest, const void *src, size_t n) { void __attribute__((weak)) __aeabi_memmove8(void *dest, const void *src, size_t n) {
memmove(dest, src, n); memmove(dest, src, n);
} }
void __attribute__((weak)) __aeabi_memmove4_impl(void *dest, const void *src, size_t n) { void __attribute__((weak)) __aeabi_memmove4(void *dest, const void *src, size_t n) {
memmove(dest, src, n); memmove(dest, src, n);
} }
void __attribute__((weak)) __aeabi_memmove_impl(void *dest, const void *src, size_t n) { void __attribute__((weak)) __aeabi_memmove(void *dest, const void *src, size_t n) {
memmove(dest, src, n);
}
void __attribute__((weak)) __aeabi_memmove8_impl2(void *dest, const void *src, size_t n) {
memmove(dest, src, n);
}
void __attribute__((weak)) __aeabi_memmove4_impl2(void *dest, const void *src, size_t n) {
memmove(dest, src, n);
}
void __attribute__((weak)) __aeabi_memmove_impl2(void *dest, const void *src, size_t n) {
memmove(dest, src, n); memmove(dest, src, n);
} }
@ -115,71 +87,27 @@ void __attribute__((weak)) __aeabi_memmove_impl2(void *dest, const void *src, si
* This allows __aeabi_memclr to tail-call __aeabi_memset * This allows __aeabi_memclr to tail-call __aeabi_memset
*/ */
void __attribute__((weak)) __aeabi_memset8_impl(void *dest, size_t n, int c) { void __attribute__((weak)) __aeabi_memset8(void *dest, size_t n, int c) {
memset(dest, c, n); memset(dest, c, n);
} }
void __attribute__((weak)) __aeabi_memset4_impl(void *dest, size_t n, int c) { void __attribute__((weak)) __aeabi_memset4(void *dest, size_t n, int c) {
memset(dest, c, n); memset(dest, c, n);
} }
void __attribute__((weak)) __aeabi_memset_impl(void *dest, size_t n, int c) { void __attribute__((weak)) __aeabi_memset(void *dest, size_t n, int c) {
memset(dest, c, n);
}
void __attribute__((weak)) __aeabi_memset8_impl2(void *dest, size_t n, int c) {
memset(dest, c, n);
}
void __attribute__((weak)) __aeabi_memset4_impl2(void *dest, size_t n, int c) {
memset(dest, c, n);
}
void __attribute__((weak)) __aeabi_memset_impl2(void *dest, size_t n, int c) {
memset(dest, c, n); memset(dest, c, n);
} }
void __attribute__((weak)) __aeabi_memclr8_impl(void *dest, size_t n) { void __attribute__((weak)) __aeabi_memclr8(void *dest, size_t n) {
__aeabi_memset8_impl(dest, n, 0); __aeabi_memset8(dest, n, 0);
} }
void __attribute__((weak)) __aeabi_memclr4_impl(void *dest, size_t n) { void __attribute__((weak)) __aeabi_memclr4(void *dest, size_t n) {
__aeabi_memset4_impl(dest, n, 0); __aeabi_memset4(dest, n, 0);
} }
void __attribute__((weak)) __aeabi_memclr_impl(void *dest, size_t n) { void __attribute__((weak)) __aeabi_memclr(void *dest, size_t n) {
__aeabi_memset_impl(dest, n, 0); __aeabi_memset(dest, n, 0);
} }
void __attribute__((weak)) __aeabi_memclr8_impl2(void *dest, size_t n) {
__aeabi_memset8_impl(dest, n, 0);
}
void __attribute__((weak)) __aeabi_memclr4_impl2(void *dest, size_t n) {
__aeabi_memset4_impl(dest, n, 0);
}
void __attribute__((weak)) __aeabi_memclr_impl2(void *dest, size_t n) {
__aeabi_memset_impl(dest, n, 0);
}
#define __AEABI_SYMVERS(fn_name) \
__asm__(".symver " #fn_name "_impl, " #fn_name "@@LIBC_N"); \
__asm__(".symver " #fn_name "_impl2, " #fn_name "@LIBC_PRIVATE")
__AEABI_SYMVERS(__aeabi_atexit);
__AEABI_SYMVERS(__aeabi_memcpy8);
__AEABI_SYMVERS(__aeabi_memcpy4);
__AEABI_SYMVERS(__aeabi_memcpy);
__AEABI_SYMVERS(__aeabi_memmove8);
__AEABI_SYMVERS(__aeabi_memmove4);
__AEABI_SYMVERS(__aeabi_memmove);
__AEABI_SYMVERS(__aeabi_memset8);
__AEABI_SYMVERS(__aeabi_memset4);
__AEABI_SYMVERS(__aeabi_memset);
__AEABI_SYMVERS(__aeabi_memclr8);
__AEABI_SYMVERS(__aeabi_memclr4);
__AEABI_SYMVERS(__aeabi_memclr);
#undef __AEABI_SYMVERS

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2008 The Android Open Source Project * Copyright (C) 2008-2010 The Android Open Source Project
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

View File

@ -34,9 +34,7 @@
// __restore_rt (but covered by the .fnstart/.fnend) so that although they're // __restore_rt (but covered by the .fnstart/.fnend) so that although they're
// not inside the functions from objdump's point of view, an unwinder that // not inside the functions from objdump's point of view, an unwinder that
// blindly looks at the previous instruction (but is then smart enough to check // blindly looks at the previous instruction (but is then smart enough to check
// the unwind information to find out where it landed) gets the right answer. // the DWARF information to find out where it landed) gets the right answer.
// Make sure not to have both DWARF and ARM unwind information, so only
// use the ARM unwind information.
// We need to place .fnstart ourselves (but we may as well keep the free .fnend). // We need to place .fnstart ourselves (but we may as well keep the free .fnend).
#undef __bionic_asm_custom_entry #undef __bionic_asm_custom_entry
@ -46,18 +44,18 @@
.save {r0-r15} .save {r0-r15}
.pad #32 .pad #32
nop nop
ENTRY_PRIVATE_NO_DWARF(__restore) ENTRY_PRIVATE(__restore)
// This function must have exactly this instruction sequence. // This function must have exactly this instruction sequence.
mov r7, #__NR_sigreturn mov r7, #__NR_sigreturn
swi #0 swi #0
END_NO_DWARF(__restore) END(__restore)
.fnstart .fnstart
.save {r0-r15} .save {r0-r15}
.pad #160 .pad #160
nop nop
ENTRY_PRIVATE_NO_DWARF(__restore_rt) ENTRY_PRIVATE(__restore_rt)
// This function must have exactly this instruction sequence. // This function must have exactly this instruction sequence.
mov r7, #__NR_rt_sigreturn mov r7, #__NR_rt_sigreturn
swi #0 swi #0
END_NO_DWARF(__restore_rt) END(__restore_rt)

View File

@ -37,13 +37,7 @@
* the expectation that libc will define it and call through to * the expectation that libc will define it and call through to
* a differently-named function in the dynamic linker. * a differently-named function in the dynamic linker.
*/ */
_Unwind_Ptr __gnu_Unwind_Find_exidx_impl(_Unwind_Ptr pc, int *pcount) { _Unwind_Ptr __gnu_Unwind_Find_exidx(_Unwind_Ptr pc, int *pcount)
{
return dl_unwind_find_exidx(pc, pcount); return dl_unwind_find_exidx(pc, pcount);
} }
_Unwind_Ptr __gnu_Unwind_Find_exidx_impl2(_Unwind_Ptr pc, int *pcount) {
return dl_unwind_find_exidx(pc, pcount);
}
__asm__(".symver __gnu_Unwind_Find_exidx_impl,__gnu_Unwind_Find_exidx@LIBC_PRIVATE");
__asm__(".symver __gnu_Unwind_Find_exidx_impl2,__gnu_Unwind_Find_exidx@@LIBC_N");

View File

@ -0,0 +1,93 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO: This file should go away once unwinder migration to libc++.so is complete.
extern char _Unwind_Backtrace __attribute((visibility("protected")));
extern char __gnu_Unwind_Find_exidx __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_VFP_D __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_VFP __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_VFP_D_16_to_31 __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_WMMXD __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_WMMXC __attribute((visibility("protected")));
extern char _Unwind_GetCFA __attribute((visibility("protected")));
extern char __gnu_Unwind_RaiseException __attribute((visibility("protected")));
extern char __gnu_Unwind_ForcedUnwind __attribute((visibility("protected")));
extern char __gnu_Unwind_Resume __attribute((visibility("protected")));
extern char __gnu_Unwind_Resume_or_Rethrow __attribute((visibility("protected")));
extern char _Unwind_Complete __attribute((visibility("protected")));
extern char _Unwind_DeleteException __attribute((visibility("protected")));
extern char _Unwind_VRS_Get __attribute((visibility("protected")));
extern char _Unwind_VRS_Set __attribute((visibility("protected")));
extern char __gnu_Unwind_Backtrace __attribute((visibility("protected")));
extern char _Unwind_VRS_Pop __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_VFP_D __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_VFP __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_VFP_D_16_to_31 __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_WMMXD __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_WMMXC __attribute((visibility("protected")));
extern char ___Unwind_RaiseException __attribute((visibility("protected")));
extern char _Unwind_RaiseException __attribute((visibility("protected")));
extern char ___Unwind_Resume __attribute((visibility("protected")));
extern char _Unwind_Resume __attribute((visibility("protected")));
extern char ___Unwind_Resume_or_Rethrow __attribute((visibility("protected")));
extern char _Unwind_Resume_or_Rethrow __attribute((visibility("protected")));
extern char ___Unwind_ForcedUnwind __attribute((visibility("protected")));
extern char _Unwind_ForcedUnwind __attribute((visibility("protected")));
extern char ___Unwind_Backtrace __attribute((visibility("protected")));
extern char _Unwind_GetRegionStart __attribute((visibility("protected")));
extern char _Unwind_GetLanguageSpecificData __attribute((visibility("protected")));
extern char _Unwind_GetDataRelBase __attribute((visibility("protected")));
extern char _Unwind_GetTextRelBase __attribute((visibility("protected")));
void* __bionic_libgcc_unwind_symbols[] = {
&_Unwind_Backtrace,
&__gnu_Unwind_Find_exidx,
&__gnu_Unwind_Restore_VFP_D,
&__gnu_Unwind_Restore_VFP,
&__gnu_Unwind_Restore_VFP_D_16_to_31,
&__gnu_Unwind_Restore_WMMXD,
&__gnu_Unwind_Restore_WMMXC,
&_Unwind_GetCFA,
&__gnu_Unwind_RaiseException,
&__gnu_Unwind_ForcedUnwind,
&__gnu_Unwind_Resume,
&__gnu_Unwind_Resume_or_Rethrow,
&_Unwind_Complete,
&_Unwind_DeleteException,
&_Unwind_VRS_Get,
&_Unwind_VRS_Set,
&__gnu_Unwind_Backtrace,
&_Unwind_VRS_Pop,
&__gnu_Unwind_Save_VFP_D,
&__gnu_Unwind_Save_VFP,
&__gnu_Unwind_Save_VFP_D_16_to_31,
&__gnu_Unwind_Save_WMMXD,
&__gnu_Unwind_Save_WMMXC,
&___Unwind_RaiseException,
&_Unwind_RaiseException,
&___Unwind_Resume,
&_Unwind_Resume,
&___Unwind_Resume_or_Rethrow,
&_Unwind_Resume_or_Rethrow,
&___Unwind_ForcedUnwind,
&_Unwind_ForcedUnwind,
&___Unwind_Backtrace,
&_Unwind_GetRegionStart,
&_Unwind_GetLanguageSpecificData,
&_Unwind_GetDataRelBase,
&_Unwind_GetTextRelBase,
};

View File

@ -1,42 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
// Export this to maintain ABI compatibilty with libgcc, since compiler-rt
// doesn't use a table-driven implementation of __popcount.
const unsigned char __popcount_tab[256] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3,
3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4,
3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4,
4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2,
2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5,
4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5,
5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5,
5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};

View File

@ -52,7 +52,7 @@
// Current layout (may change in the future): // Current layout (may change in the future):
// //
// word name description // word name description
// 0 sigflag/cookie setjmp cookie in top 31 bits, signal mask flag in low bit // 0 magic magic number
// 1 sigmask signal mask (not used with _setjmp / _longjmp) // 1 sigmask signal mask (not used with _setjmp / _longjmp)
// 2 float_base base of float registers (d8 to d15) // 2 float_base base of float registers (d8 to d15)
// 18 float_state floating-point status and control register // 18 float_state floating-point status and control register
@ -80,82 +80,33 @@ ENTRY(_setjmp)
b sigsetjmp b sigsetjmp
END(_setjmp) END(_setjmp)
#define MANGLE_REGISTERS 1
.macro m_mangle_registers reg
#if MANGLE_REGISTERS
eor r4, r4, \reg
eor r5, r5, \reg
eor r6, r6, \reg
eor r7, r7, \reg
eor r8, r8, \reg
eor r9, r9, \reg
eor r10, r10, \reg
eor r11, r11, \reg
eor r12, r12, \reg
eor r13, r13, \reg
eor r14, r14, \reg
#endif
.endm
.macro m_unmangle_registers reg
m_mangle_registers \reg
.endm
// int sigsetjmp(sigjmp_buf env, int save_signal_mask); // int sigsetjmp(sigjmp_buf env, int save_signal_mask);
ENTRY(sigsetjmp) ENTRY(sigsetjmp)
stmfd sp!, {r0, lr} // Record whether or not we're saving the signal mask.
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
mov r0, r1
bl __bionic_setjmp_cookie_get
mov r1, r0
ldmfd sp, {r0}
// Save the setjmp cookie for later.
bic r2, r1, #1
stmfd sp!, {r2}
.cfi_adjust_cfa_offset 4
// Record the setjmp cookie and whether or not we're saving the signal mask.
str r1, [r0, #(_JB_SIGFLAG * 4)] str r1, [r0, #(_JB_SIGFLAG * 4)]
// Do we need to save the signal mask? // Do we need to save the signal mask?
tst r1, #1 teq r1, #0
beq 1f beq 1f
// Align the stack. // Get current signal mask.
sub sp, #4 stmfd sp!, {r0, r14}
.cfi_adjust_cfa_offset 4 .cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset r14, 4
mov r0, #0
bl sigblock
mov r1, r0
ldmfd sp!, {r0, r14}
.cfi_def_cfa_offset 0
// Save the current signal mask. // Save the signal mask.
add r2, r0, #(_JB_SIGMASK * 4) str r1, [r0, #(_JB_SIGMASK * 4)]
mov r0, #2 // SIG_SETMASK
mov r1, #0
bl sigprocmask
// Unalign the stack.
add sp, #4
.cfi_adjust_cfa_offset -4
1: 1:
ldmfd sp!, {r2}
.cfi_adjust_cfa_offset -4
ldmfd sp!, {r0, lr}
.cfi_adjust_cfa_offset -8
.cfi_restore r0
.cfi_restore lr
// Save core registers. // Save core registers.
add r1, r0, #(_JB_CORE_BASE * 4) add r1, r0, #(_JB_CORE_BASE * 4)
m_mangle_registers r2 stmia r1, {r4-r14}
// ARM deprecates using sp in the register list for stmia.
stmia r1, {r4-r12, lr}
str sp, [r1, #(10 * 4)]
m_unmangle_registers r2
// Save floating-point registers. // Save floating-point registers.
add r1, r0, #(_JB_FLOAT_BASE * 4) add r1, r0, #(_JB_FLOAT_BASE * 4)
@ -171,30 +122,29 @@ END(sigsetjmp)
// void siglongjmp(sigjmp_buf env, int value); // void siglongjmp(sigjmp_buf env, int value);
ENTRY(siglongjmp) ENTRY(siglongjmp)
stmfd sp!, {r0, r1, lr}
.cfi_def_cfa_offset 12
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
.cfi_rel_offset lr, 8
// Fetch the signal flag.
ldr r1, [r0, #(_JB_SIGFLAG * 4)]
// Do we need to restore the signal mask? // Do we need to restore the signal mask?
ands r1, r1, #1 ldr r2, [r0, #(_JB_SIGFLAG * 4)]
teq r2, #0
beq 1f beq 1f
// Restore the signal mask. // Restore the signal mask.
stmfd sp!, {r0, r1, r14}
.cfi_def_cfa_offset 12
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
.cfi_rel_offset r14, 8
sub sp, sp, #4 // Align the stack.
.cfi_adjust_cfa_offset 4
ldr r0, [r0, #(_JB_SIGMASK * 4)] ldr r0, [r0, #(_JB_SIGMASK * 4)]
bl sigsetmask bl sigsetmask
1: add sp, sp, #4 // Unalign the stack.
ldmfd sp!, {r0, r1, lr} .cfi_adjust_cfa_offset -4
.cfi_adjust_cfa_offset -12 ldmfd sp!, {r0, r1, r14}
.cfi_restore r0 .cfi_def_cfa_offset 0
.cfi_restore r1
.cfi_restore lr
1:
// Restore floating-point registers. // Restore floating-point registers.
add r2, r0, #(_JB_FLOAT_BASE * 4) add r2, r0, #(_JB_FLOAT_BASE * 4)
vldmia r2, {d8-d15} vldmia r2, {d8-d15}
@ -204,27 +154,16 @@ ENTRY(siglongjmp)
fmxr fpscr, r2 fmxr fpscr, r2
// Restore core registers. // Restore core registers.
ldr r3, [r0, #(_JB_SIGFLAG * 4)]
bic r3, r3, #1
add r2, r0, #(_JB_CORE_BASE * 4) add r2, r0, #(_JB_CORE_BASE * 4)
ldmia r2, {r4-r14}
// ARM deprecates using sp in the register list for ldmia. // Validate sp and r14.
ldmia r2, {r4-r12, lr} teq sp, #0
ldr sp, [r2, #(10 * 4)] teqne r14, #0
m_unmangle_registers r3 bleq longjmperror
// Save the return value/address and check the setjmp cookie.
stmfd sp!, {r1, lr}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset lr, 4
mov r0, r3
bl __bionic_setjmp_cookie_check
// Restore return value/address.
ldmfd sp!, {r0, lr}
.cfi_adjust_cfa_offset -8
.cfi_restore lr
// Set return value.
mov r0, r1
teq r0, #0 teq r0, #0
moveq r0, #1 moveq r0, #1
bx lr bx lr

View File

@ -1,46 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <private/bionic_asm.h>
ENTRY(vfork)
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
mrc p15, 0, r3, c13, c0, 3
ldr r3, [r3, #4]
mov r0, #0
str r0, [r3, #12]
mov ip, r7
ldr r7, =__NR_vfork
swi #0
mov r7, ip
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(vfork)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2015 The Android Open Source Project * Copyright (C) 2013 The Android Open Source Project
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -26,7 +26,191 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
// Indicate which memcpy base file to include. #include <private/bionic_asm.h>
#define MEMCPY_BASE "memcpy_base.S" #include <private/libc_events.h>
#include "__strcat_chk_common.S" .syntax unified
.thumb
.thumb_func
// Get the length of src string, then get the source of the dst string.
// Check that the two lengths together don't exceed the threshold, then
// do a memcpy of the data.
ENTRY(__strcat_chk)
pld [r0, #0]
push {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
push {r4, r5}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
mov lr, r2
// Save the dst register to r5
mov r5, r0
// Zero out r4
eor r4, r4, r4
// r1 contains the address of the string to count.
.L_strlen_start:
mov r0, r1
ands r3, r1, #7
beq .L_mainloop
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .L_align_to_32
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_finish
.L_align_to_32:
bcc .L_align_to_64
ands ip, r3, #2
beq .L_align_to_64
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_finish
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_finish
.L_align_to_64:
tst r3, #4
beq .L_mainloop
ldr r3, [r1], #4
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
.p2align 2
.L_mainloop:
ldrd r2, r3, [r1], #8
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
b .L_mainloop
.L_update_count_and_finish:
sub r3, r1, r0
sub r3, r3, #1
b .L_finish
.L_zero_in_first_register:
sub r3, r1, r0
lsls r2, ip, #17
bne .L_sub8_and_finish
bcs .L_sub7_and_finish
lsls ip, ip, #1
bne .L_sub6_and_finish
sub r3, r3, #5
b .L_finish
.L_sub8_and_finish:
sub r3, r3, #8
b .L_finish
.L_sub7_and_finish:
sub r3, r3, #7
b .L_finish
.L_sub6_and_finish:
sub r3, r3, #6
b .L_finish
.L_zero_in_second_register:
sub r3, r1, r0
lsls r2, ip, #17
bne .L_sub4_and_finish
bcs .L_sub3_and_finish
lsls ip, ip, #1
bne .L_sub2_and_finish
sub r3, r3, #1
b .L_finish
.L_sub4_and_finish:
sub r3, r3, #4
b .L_finish
.L_sub3_and_finish:
sub r3, r3, #3
b .L_finish
.L_sub2_and_finish:
sub r3, r3, #2
.L_finish:
cmp r4, #0
bne .L_strlen_done
// Time to get the dst string length.
mov r1, r5
// Save the original source address to r5.
mov r5, r0
// Save the current length (adding 1 for the terminator).
add r4, r3, #1
b .L_strlen_start
// r0 holds the pointer to the dst string.
// r3 holds the dst string length.
// r4 holds the src string length + 1.
.L_strlen_done:
add r2, r3, r4
cmp r2, lr
bhi __strcat_chk_failed
// Set up the registers for the memcpy code.
mov r1, r5
pld [r1, #64]
mov r2, r4
add r0, r0, r3
pop {r4, r5}
END(__strcat_chk)
#define MEMCPY_BASE __strcat_chk_memcpy_base
#define MEMCPY_BASE_ALIGNED __strcat_chk_memcpy_base_aligned
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcat_chk_failed)
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
ldr r0, error_message
ldr r1, error_code
1:
add r0, pc
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_STRCAT_BUFFER_OVERFLOW
error_message:
.word error_string-(1b+4)
END(__strcat_chk_failed)
.data
error_string:
.string "strcat: prevented write past end of buffer"

View File

@ -1,212 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <private/bionic_asm.h>
#include <private/libc_events.h>
.syntax unified
.thumb
.thumb_func
// Get the length of src string, then get the source of the dst string.
// Check that the two lengths together don't exceed the threshold, then
// do a memcpy of the data.
ENTRY(__strcat_chk)
pld [r0, #0]
push {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
push {r4, r5}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
mov lr, r2
// Save the dst register to r5
mov r5, r0
// Zero out r4
eor r4, r4, r4
// r1 contains the address of the string to count.
.L_strlen_start:
mov r0, r1
ands r3, r1, #7
beq .L_mainloop
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .L_align_to_32
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_finish
.L_align_to_32:
bcc .L_align_to_64
ands ip, r3, #2
beq .L_align_to_64
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_finish
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_finish
.L_align_to_64:
tst r3, #4
beq .L_mainloop
ldr r3, [r1], #4
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
.p2align 2
.L_mainloop:
ldrd r2, r3, [r1], #8
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
b .L_mainloop
.L_update_count_and_finish:
sub r3, r1, r0
sub r3, r3, #1
b .L_finish
.L_zero_in_first_register:
sub r3, r1, r0
lsls r2, ip, #17
bne .L_sub8_and_finish
bcs .L_sub7_and_finish
lsls ip, ip, #1
bne .L_sub6_and_finish
sub r3, r3, #5
b .L_finish
.L_sub8_and_finish:
sub r3, r3, #8
b .L_finish
.L_sub7_and_finish:
sub r3, r3, #7
b .L_finish
.L_sub6_and_finish:
sub r3, r3, #6
b .L_finish
.L_zero_in_second_register:
sub r3, r1, r0
lsls r2, ip, #17
bne .L_sub4_and_finish
bcs .L_sub3_and_finish
lsls ip, ip, #1
bne .L_sub2_and_finish
sub r3, r3, #1
b .L_finish
.L_sub4_and_finish:
sub r3, r3, #4
b .L_finish
.L_sub3_and_finish:
sub r3, r3, #3
b .L_finish
.L_sub2_and_finish:
sub r3, r3, #2
.L_finish:
cmp r4, #0
bne .L_strlen_done
// Time to get the dst string length.
mov r1, r5
// Save the original source address to r5.
mov r5, r0
// Save the current length (adding 1 for the terminator).
add r4, r3, #1
b .L_strlen_start
// r0 holds the pointer to the dst string.
// r3 holds the dst string length.
// r4 holds the src string length + 1.
.L_strlen_done:
add r2, r3, r4
cmp r2, lr
bhi .L_strcat_chk_failed
// Set up the registers for the memcpy code.
mov r1, r5
pld [r1, #64]
mov r2, r4
add r0, r0, r3
pop {r4, r5}
.cfi_adjust_cfa_offset -8
.cfi_restore r4
.cfi_restore r5
#include MEMCPY_BASE
// Undo the above cfi directives
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.L_strcat_chk_failed:
ldr r0, error_message
ldr r1, error_code
1:
add r0, pc
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_STRCAT_BUFFER_OVERFLOW
error_message:
.word error_string-(1b+4)
END(__strcat_chk)
.data
error_string:
.string "strcat: prevented write past end of buffer"

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2015 The Android Open Source Project * Copyright (C) 2013 The Android Open Source Project
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -26,7 +26,155 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
// Indicate which memcpy base file to include. #include <private/bionic_asm.h>
#define MEMCPY_BASE "memcpy_base.S" #include <private/libc_events.h>
#include "__strcpy_chk_common.S" .syntax unified
.thumb
.thumb_func
// Get the length of the source string first, then do a memcpy of the data
// instead of a strcpy.
ENTRY(__strcpy_chk)
pld [r0, #0]
push {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
mov lr, r2
mov r0, r1
ands r3, r1, #7
beq .L_mainloop
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .L_align_to_32
ldrb r2, [r0], #1
cbz r2, .L_update_count_and_finish
.L_align_to_32:
bcc .L_align_to_64
ands ip, r3, #2
beq .L_align_to_64
ldrb r2, [r0], #1
cbz r2, .L_update_count_and_finish
ldrb r2, [r0], #1
cbz r2, .L_update_count_and_finish
.L_align_to_64:
tst r3, #4
beq .L_mainloop
ldr r3, [r0], #4
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
.p2align 2
.L_mainloop:
ldrd r2, r3, [r0], #8
pld [r0, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
b .L_mainloop
.L_update_count_and_finish:
sub r3, r0, r1
sub r3, r3, #1
b .L_check_size
.L_zero_in_first_register:
sub r3, r0, r1
lsls r2, ip, #17
bne .L_sub8_and_finish
bcs .L_sub7_and_finish
lsls ip, ip, #1
bne .L_sub6_and_finish
sub r3, r3, #5
b .L_check_size
.L_sub8_and_finish:
sub r3, r3, #8
b .L_check_size
.L_sub7_and_finish:
sub r3, r3, #7
b .L_check_size
.L_sub6_and_finish:
sub r3, r3, #6
b .L_check_size
.L_zero_in_second_register:
sub r3, r0, r1
lsls r2, ip, #17
bne .L_sub4_and_finish
bcs .L_sub3_and_finish
lsls ip, ip, #1
bne .L_sub2_and_finish
sub r3, r3, #1
b .L_check_size
.L_sub4_and_finish:
sub r3, r3, #4
b .L_check_size
.L_sub3_and_finish:
sub r3, r3, #3
b .L_check_size
.L_sub2_and_finish:
sub r3, r3, #2
.L_check_size:
pld [r1, #0]
pld [r1, #64]
ldr r0, [sp]
cmp r3, lr
bhs __strcpy_chk_failed
// Add 1 for copy length to get the string terminator.
add r2, r3, #1
END(__strcpy_chk)
#define MEMCPY_BASE __strcpy_chk_memcpy_base
#define MEMCPY_BASE_ALIGNED __strcpy_chk_memcpy_base_aligned
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcpy_chk_failed)
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
ldr r0, error_message
ldr r1, error_code
1:
add r0, pc
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_STRCPY_BUFFER_OVERFLOW
error_message:
.word error_string-(1b+4)
END(__strcpy_chk_failed)
.data
error_string:
.string "strcpy: prevented write past end of buffer"

View File

@ -1,173 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <private/bionic_asm.h>
#include <private/libc_events.h>
.syntax unified
.thumb
.thumb_func
// Get the length of the source string first, then do a memcpy of the data
// instead of a strcpy.
ENTRY(__strcpy_chk)
pld [r0, #0]
push {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
mov lr, r2
mov r0, r1
ands r3, r1, #7
beq .L_mainloop
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .L_align_to_32
ldrb r2, [r0], #1
cbz r2, .L_update_count_and_finish
.L_align_to_32:
bcc .L_align_to_64
ands ip, r3, #2
beq .L_align_to_64
ldrb r2, [r0], #1
cbz r2, .L_update_count_and_finish
ldrb r2, [r0], #1
cbz r2, .L_update_count_and_finish
.L_align_to_64:
tst r3, #4
beq .L_mainloop
ldr r3, [r0], #4
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
.p2align 2
.L_mainloop:
ldrd r2, r3, [r0], #8
pld [r0, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
b .L_mainloop
.L_update_count_and_finish:
sub r3, r0, r1
sub r3, r3, #1
b .L_check_size
.L_zero_in_first_register:
sub r3, r0, r1
lsls r2, ip, #17
bne .L_sub8_and_finish
bcs .L_sub7_and_finish
lsls ip, ip, #1
bne .L_sub6_and_finish
sub r3, r3, #5
b .L_check_size
.L_sub8_and_finish:
sub r3, r3, #8
b .L_check_size
.L_sub7_and_finish:
sub r3, r3, #7
b .L_check_size
.L_sub6_and_finish:
sub r3, r3, #6
b .L_check_size
.L_zero_in_second_register:
sub r3, r0, r1
lsls r2, ip, #17
bne .L_sub4_and_finish
bcs .L_sub3_and_finish
lsls ip, ip, #1
bne .L_sub2_and_finish
sub r3, r3, #1
b .L_check_size
.L_sub4_and_finish:
sub r3, r3, #4
b .L_check_size
.L_sub3_and_finish:
sub r3, r3, #3
b .L_check_size
.L_sub2_and_finish:
sub r3, r3, #2
.L_check_size:
pld [r1, #0]
pld [r1, #64]
ldr r0, [sp]
cmp r3, lr
bhs .L_strcpy_chk_failed
// Add 1 for copy length to get the string terminator.
add r2, r3, #1
#include MEMCPY_BASE
.L_strcpy_chk_failed:
ldr r0, error_message
ldr r1, error_code
1:
add r0, pc
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_STRCPY_BUFFER_OVERFLOW
error_message:
.word error_string-(1b+4)
END(__strcpy_chk)
.data
error_string:
.string "strcpy: prevented write past end of buffer"

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2015 The Android Open Source Project * Copyright (C) 2008 The Android Open Source Project
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -25,8 +25,79 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Indicate which memcpy base file to include. // Prototype: void *memcpy (void *dst, const void *src, size_t count).
#define MEMCPY_BASE "memcpy_base.S"
#include "memcpy_common.S" #include <private/bionic_asm.h>
#include <private/libc_events.h>
.text
.syntax unified
.fpu neon
ENTRY(__memcpy_chk)
cmp r2, r3
bhi __memcpy_chk_fail
// Fall through to memcpy...
END(__memcpy_chk)
ENTRY(memcpy)
pld [r1, #64]
push {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
END(memcpy)
#define MEMCPY_BASE __memcpy_base
#define MEMCPY_BASE_ALIGNED __memcpy_base_aligned
#include "memcpy_base.S"
ENTRY_PRIVATE(__memcpy_chk_fail)
// Preserve lr for backtrace.
push {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0
ldr r0, error_message
ldr r1, error_code
1:
add r0, pc
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
error_message:
.word error_string-(1b+8)
END(__memcpy_chk_fail)
.data
error_string:
.string "memcpy: prevented write past end of buffer"

View File

@ -53,7 +53,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
.L_memcpy_base: ENTRY_PRIVATE(MEMCPY_BASE)
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
// Assumes that n >= 0, and dst, src are valid pointers. // Assumes that n >= 0, and dst, src are valid pointers.
// For any sizes less than 832 use the neon code that doesn't // For any sizes less than 832 use the neon code that doesn't
// care about the src alignment. This avoids any checks // care about the src alignment. This avoids any checks
@ -164,6 +168,12 @@
eor r3, r0, r1 eor r3, r0, r1
ands r3, r3, #0x3 ands r3, r3, #0x3
bne .L_copy_unknown_alignment bne .L_copy_unknown_alignment
END(MEMCPY_BASE)
ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED)
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
// To try and improve performance, stack layout changed, // To try and improve performance, stack layout changed,
// i.e., not keeping the stack looking like users expect // i.e., not keeping the stack looking like users expect
@ -175,7 +185,7 @@
strd r6, r7, [sp, #-8]! strd r6, r7, [sp, #-8]!
.cfi_adjust_cfa_offset 8 .cfi_adjust_cfa_offset 8
.cfi_rel_offset r6, 0 .cfi_rel_offset r6, 0
.cfi_rel_offset r7, 4 .cfi_rel_offset r7, 0
strd r8, r9, [sp, #-8]! strd r8, r9, [sp, #-8]!
.cfi_adjust_cfa_offset 8 .cfi_adjust_cfa_offset 8
.cfi_rel_offset r8, 0 .cfi_rel_offset r8, 0
@ -281,28 +291,10 @@
// Restore registers: optimized pop {r0, pc} // Restore registers: optimized pop {r0, pc}
ldrd r8, r9, [sp], #8 ldrd r8, r9, [sp], #8
.cfi_adjust_cfa_offset -8
.cfi_restore r8
.cfi_restore r9
ldrd r6, r7, [sp], #8 ldrd r6, r7, [sp], #8
.cfi_adjust_cfa_offset -8
.cfi_restore r6
.cfi_restore r7
ldrd r4, r5, [sp], #8 ldrd r4, r5, [sp], #8
.cfi_adjust_cfa_offset -8
.cfi_restore r4
.cfi_restore r5
pop {r0, pc} pop {r0, pc}
// Put the cfi directives back for the below instructions.
.cfi_adjust_cfa_offset 24
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
.cfi_rel_offset r8, 16
.cfi_rel_offset r9, 20
.L_dst_not_word_aligned: .L_dst_not_word_aligned:
// Align dst to word. // Align dst to word.
rsb ip, ip, #4 rsb ip, ip, #4
@ -323,12 +315,4 @@
// Src is guaranteed to be at least word aligned by this point. // Src is guaranteed to be at least word aligned by this point.
b .L_word_aligned b .L_word_aligned
END(MEMCPY_BASE_ALIGNED)
// Undo any cfi directives from above.
.cfi_adjust_cfa_offset -24
.cfi_restore r4
.cfi_restore r5
.cfi_restore r6
.cfi_restore r7
.cfi_restore r8
.cfi_restore r9

View File

@ -1,103 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <private/bionic_asm.h>
#include <private/libc_events.h>
.text
.syntax unified
.fpu neon
ENTRY(__memcpy_chk)
cmp r2, r3
bhi .L_memcpy_chk_fail
// Fall through to memcpy...
END(__memcpy_chk)
// Prototype: void *memcpy (void *dst, const void *src, size_t count).
ENTRY(memcpy)
pld [r1, #64]
push {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
#include MEMCPY_BASE
// Undo the cfi instructions from above.
.cfi_def_cfa_offset 0
.cfi_restore r0
.cfi_restore lr
.L_memcpy_chk_fail:
// Preserve lr for backtrace.
push {lr}
.cfi_adjust_cfa_offset 4
.cfi_rel_offset lr, 0
ldr r0, error_message
ldr r1, error_code
1:
add r0, pc
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
error_message:
.word error_string-(1b+8)
END(memcpy)
.data
error_string:
.string "memcpy: prevented write past end of buffer"

View File

@ -70,7 +70,7 @@
.macro m_scan_byte .macro m_scan_byte
ldrb r3, [r0] ldrb r3, [r0]
cbz r3, .L_strcat_r0_scan_done cbz r3, strcat_r0_scan_done
add r0, #1 add r0, #1
.endm // m_scan_byte .endm // m_scan_byte
@ -84,10 +84,10 @@ ENTRY(strcat)
// Quick check to see if src is empty. // Quick check to see if src is empty.
ldrb r2, [r1] ldrb r2, [r1]
pld [r1, #0] pld [r1, #0]
cbnz r2, .L_strcat_continue cbnz r2, strcat_continue
bx lr bx lr
.L_strcat_continue: strcat_continue:
// To speed up really small dst strings, unroll checking the first 4 bytes. // To speed up really small dst strings, unroll checking the first 4 bytes.
m_push m_push
m_scan_byte m_scan_byte
@ -96,102 +96,95 @@ ENTRY(strcat)
m_scan_byte m_scan_byte
ands r3, r0, #7 ands r3, r0, #7
beq .L_strcat_mainloop beq strcat_mainloop
// Align to a double word (64 bits). // Align to a double word (64 bits).
rsb r3, r3, #8 rsb r3, r3, #8
lsls ip, r3, #31 lsls ip, r3, #31
beq .L_strcat_align_to_32 beq strcat_align_to_32
ldrb r5, [r0] ldrb r5, [r0]
cbz r5, .L_strcat_r0_scan_done cbz r5, strcat_r0_scan_done
add r0, r0, #1 add r0, r0, #1
.L_strcat_align_to_32: strcat_align_to_32:
bcc .L_strcat_align_to_64 bcc strcat_align_to_64
ldrb r2, [r0] ldrb r2, [r0]
cbz r2, .L_strcat_r0_scan_done cbz r2, strcat_r0_scan_done
add r0, r0, #1 add r0, r0, #1
ldrb r4, [r0] ldrb r4, [r0]
cbz r4, .L_strcat_r0_scan_done cbz r4, strcat_r0_scan_done
add r0, r0, #1 add r0, r0, #1
.L_strcat_align_to_64: strcat_align_to_64:
tst r3, #4 tst r3, #4
beq .L_strcat_mainloop beq strcat_mainloop
ldr r3, [r0], #4 ldr r3, [r0], #4
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcat_zero_in_second_register bne strcat_zero_in_second_register
b .L_strcat_mainloop b strcat_mainloop
.L_strcat_r0_scan_done: strcat_r0_scan_done:
// For short copies, hard-code checking the first 8 bytes since this // For short copies, hard-code checking the first 8 bytes since this
// new code doesn't win until after about 8 bytes. // new code doesn't win until after about 8 bytes.
m_copy_byte reg=r2, cmd=cbz, label=.L_strcpy_finish m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=.L_strcpy_finish m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=.L_strcpy_finish m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbz, label=.L_strcpy_finish m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r2, cmd=cbz, label=.L_strcpy_finish m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=.L_strcpy_finish m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=.L_strcpy_finish m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbnz, label=.L_strcpy_continue m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
.L_strcpy_finish: strcpy_finish:
m_pop m_pop
.L_strcpy_continue: strcpy_continue:
ands r3, r0, #7 ands r3, r0, #7
beq .L_strcpy_check_src_align beq strcpy_check_src_align
// Align to a double word (64 bits). // Align to a double word (64 bits).
rsb r3, r3, #8 rsb r3, r3, #8
lsls ip, r3, #31 lsls ip, r3, #31
beq .L_strcpy_align_to_32 beq strcpy_align_to_32
ldrb r2, [r1], #1 ldrb r2, [r1], #1
strb r2, [r0], #1 strb r2, [r0], #1
cbz r2, .L_strcpy_complete cbz r2, strcpy_complete
.L_strcpy_align_to_32: strcpy_align_to_32:
bcc .L_strcpy_align_to_64 bcc strcpy_align_to_64
ldrb r2, [r1], #1 ldrb r2, [r1], #1
strb r2, [r0], #1 strb r2, [r0], #1
cbz r2, .L_strcpy_complete cbz r2, strcpy_complete
ldrb r2, [r1], #1 ldrb r2, [r1], #1
strb r2, [r0], #1 strb r2, [r0], #1
cbz r2, .L_strcpy_complete cbz r2, strcpy_complete
.L_strcpy_align_to_64: strcpy_align_to_64:
tst r3, #4 tst r3, #4
beq .L_strcpy_check_src_align beq strcpy_check_src_align
// Read one byte at a time since we don't know the src alignment ldr r2, [r1], #4
// and we don't want to read into a different page.
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .L_strcpy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .L_strcpy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .L_strcpy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .L_strcpy_complete
.L_strcpy_check_src_align: sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
str r2, [r0], #4
strcpy_check_src_align:
// At this point dst is aligned to a double word, check if src // At this point dst is aligned to a double word, check if src
// is also aligned to a double word. // is also aligned to a double word.
ands r3, r1, #7 ands r3, r1, #7
bne .L_strcpy_unaligned_copy bne strcpy_unaligned_copy
.p2align 2 .p2align 2
.L_strcpy_mainloop: strcpy_mainloop:
ldrd r2, r3, [r1], #8 ldrd r2, r3, [r1], #8
pld [r1, #64] pld [r1, #64]
@ -199,128 +192,128 @@ ENTRY(strcat)
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8 strd r2, r3, [r0], #8
b .L_strcpy_mainloop b strcpy_mainloop
.L_strcpy_complete: strcpy_complete:
m_pop m_pop
.L_strcpy_zero_in_first_register: strcpy_zero_in_first_register:
lsls lr, ip, #17 lsls lr, ip, #17
bne .L_strcpy_copy1byte bne strcpy_copy1byte
bcs .L_strcpy_copy2bytes bcs strcpy_copy2bytes
lsls ip, ip, #1 lsls ip, ip, #1
bne .L_strcpy_copy3bytes bne strcpy_copy3bytes
.L_strcpy_copy4bytes: strcpy_copy4bytes:
// Copy 4 bytes to the destiniation. // Copy 4 bytes to the destiniation.
str r2, [r0] str r2, [r0]
m_pop m_pop
.L_strcpy_copy1byte: strcpy_copy1byte:
strb r2, [r0] strb r2, [r0]
m_pop m_pop
.L_strcpy_copy2bytes: strcpy_copy2bytes:
strh r2, [r0] strh r2, [r0]
m_pop m_pop
.L_strcpy_copy3bytes: strcpy_copy3bytes:
strh r2, [r0], #2 strh r2, [r0], #2
lsr r2, #16 lsr r2, #16
strb r2, [r0] strb r2, [r0]
m_pop m_pop
.L_strcpy_zero_in_second_register: strcpy_zero_in_second_register:
lsls lr, ip, #17 lsls lr, ip, #17
bne .L_strcpy_copy5bytes bne strcpy_copy5bytes
bcs .L_strcpy_copy6bytes bcs strcpy_copy6bytes
lsls ip, ip, #1 lsls ip, ip, #1
bne .L_strcpy_copy7bytes bne strcpy_copy7bytes
// Copy 8 bytes to the destination. // Copy 8 bytes to the destination.
strd r2, r3, [r0] strd r2, r3, [r0]
m_pop m_pop
.L_strcpy_copy5bytes: strcpy_copy5bytes:
str r2, [r0], #4 str r2, [r0], #4
strb r3, [r0] strb r3, [r0]
m_pop m_pop
.L_strcpy_copy6bytes: strcpy_copy6bytes:
str r2, [r0], #4 str r2, [r0], #4
strh r3, [r0] strh r3, [r0]
m_pop m_pop
.L_strcpy_copy7bytes: strcpy_copy7bytes:
str r2, [r0], #4 str r2, [r0], #4
strh r3, [r0], #2 strh r3, [r0], #2
lsr r3, #16 lsr r3, #16
strb r3, [r0] strb r3, [r0]
m_pop m_pop
.L_strcpy_unaligned_copy: strcpy_unaligned_copy:
// Dst is aligned to a double word, while src is at an unknown alignment. // Dst is aligned to a double word, while src is at an unknown alignment.
// There are 7 different versions of the unaligned copy code // There are 7 different versions of the unaligned copy code
// to prevent overreading the src. The mainloop of every single version // to prevent overreading the src. The mainloop of every single version
// will store 64 bits per loop. The difference is how much of src can // will store 64 bits per loop. The difference is how much of src can
// be read without potentially crossing a page boundary. // be read without potentially crossing a page boundary.
tbb [pc, r3] tbb [pc, r3]
.L_strcpy_unaligned_branchtable: strcpy_unaligned_branchtable:
.byte 0 .byte 0
.byte ((.L_strcpy_unalign7 - .L_strcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign6 - .L_strcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign5 - .L_strcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign4 - .L_strcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign3 - .L_strcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign2 - .L_strcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign1 - .L_strcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
.p2align 2 .p2align 2
// Can read 7 bytes before possibly crossing a page. // Can read 7 bytes before possibly crossing a page.
.L_strcpy_unalign7: strcpy_unalign7:
ldr r2, [r1], #4 ldr r2, [r1], #4
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register bne strcpy_zero_in_first_register
ldrb r3, [r1] ldrb r3, [r1]
cbz r3, .L_strcpy_unalign7_copy5bytes cbz r3, strcpy_unalign7_copy5bytes
ldrb r4, [r1, #1] ldrb r4, [r1, #1]
cbz r4, .L_strcpy_unalign7_copy6bytes cbz r4, strcpy_unalign7_copy6bytes
ldrb r5, [r1, #2] ldrb r5, [r1, #2]
cbz r5, .L_strcpy_unalign7_copy7bytes cbz r5, strcpy_unalign7_copy7bytes
ldr r3, [r1], #4 ldr r3, [r1], #4
pld [r1, #64] pld [r1, #64]
lsrs ip, r3, #24 lsrs ip, r3, #24
strd r2, r3, [r0], #8 strd r2, r3, [r0], #8
beq .L_strcpy_unalign_return beq strcpy_unalign_return
b .L_strcpy_unalign7 b strcpy_unalign7
.L_strcpy_unalign7_copy5bytes: strcpy_unalign7_copy5bytes:
str r2, [r0], #4 str r2, [r0], #4
strb r3, [r0] strb r3, [r0]
.L_strcpy_unalign_return: strcpy_unalign_return:
m_pop m_pop
.L_strcpy_unalign7_copy6bytes: strcpy_unalign7_copy6bytes:
str r2, [r0], #4 str r2, [r0], #4
strb r3, [r0], #1 strb r3, [r0], #1
strb r4, [r0], #1 strb r4, [r0], #1
m_pop m_pop
.L_strcpy_unalign7_copy7bytes: strcpy_unalign7_copy7bytes:
str r2, [r0], #4 str r2, [r0], #4
strb r3, [r0], #1 strb r3, [r0], #1
strb r4, [r0], #1 strb r4, [r0], #1
@ -329,41 +322,41 @@ ENTRY(strcat)
.p2align 2 .p2align 2
// Can read 6 bytes before possibly crossing a page. // Can read 6 bytes before possibly crossing a page.
.L_strcpy_unalign6: strcpy_unalign6:
ldr r2, [r1], #4 ldr r2, [r1], #4
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register bne strcpy_zero_in_first_register
ldrb r4, [r1] ldrb r4, [r1]
cbz r4, .L_strcpy_unalign_copy5bytes cbz r4, strcpy_unalign_copy5bytes
ldrb r5, [r1, #1] ldrb r5, [r1, #1]
cbz r5, .L_strcpy_unalign_copy6bytes cbz r5, strcpy_unalign_copy6bytes
ldr r3, [r1], #4 ldr r3, [r1], #4
pld [r1, #64] pld [r1, #64]
tst r3, #0xff0000 tst r3, #0xff0000
beq .L_strcpy_copy7bytes beq strcpy_copy7bytes
lsrs ip, r3, #24 lsrs ip, r3, #24
strd r2, r3, [r0], #8 strd r2, r3, [r0], #8
beq .L_strcpy_unalign_return beq strcpy_unalign_return
b .L_strcpy_unalign6 b strcpy_unalign6
.p2align 2 .p2align 2
// Can read 5 bytes before possibly crossing a page. // Can read 5 bytes before possibly crossing a page.
.L_strcpy_unalign5: strcpy_unalign5:
ldr r2, [r1], #4 ldr r2, [r1], #4
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register bne strcpy_zero_in_first_register
ldrb r4, [r1] ldrb r4, [r1]
cbz r4, .L_strcpy_unalign_copy5bytes cbz r4, strcpy_unalign_copy5bytes
ldr r3, [r1], #4 ldr r3, [r1], #4
@ -372,17 +365,17 @@ ENTRY(strcat)
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8 strd r2, r3, [r0], #8
b .L_strcpy_unalign5 b strcpy_unalign5
.L_strcpy_unalign_copy5bytes: strcpy_unalign_copy5bytes:
str r2, [r0], #4 str r2, [r0], #4
strb r4, [r0] strb r4, [r0]
m_pop m_pop
.L_strcpy_unalign_copy6bytes: strcpy_unalign_copy6bytes:
str r2, [r0], #4 str r2, [r0], #4
strb r4, [r0], #1 strb r4, [r0], #1
strb r5, [r0] strb r5, [r0]
@ -390,13 +383,13 @@ ENTRY(strcat)
.p2align 2 .p2align 2
// Can read 4 bytes before possibly crossing a page. // Can read 4 bytes before possibly crossing a page.
.L_strcpy_unalign4: strcpy_unalign4:
ldr r2, [r1], #4 ldr r2, [r1], #4
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register bne strcpy_zero_in_first_register
ldr r3, [r1], #4 ldr r3, [r1], #4
pld [r1, #64] pld [r1, #64]
@ -404,20 +397,20 @@ ENTRY(strcat)
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8 strd r2, r3, [r0], #8
b .L_strcpy_unalign4 b strcpy_unalign4
.p2align 2 .p2align 2
// Can read 3 bytes before possibly crossing a page. // Can read 3 bytes before possibly crossing a page.
.L_strcpy_unalign3: strcpy_unalign3:
ldrb r2, [r1] ldrb r2, [r1]
cbz r2, .L_strcpy_unalign3_copy1byte cbz r2, strcpy_unalign3_copy1byte
ldrb r3, [r1, #1] ldrb r3, [r1, #1]
cbz r3, .L_strcpy_unalign3_copy2bytes cbz r3, strcpy_unalign3_copy2bytes
ldrb r4, [r1, #2] ldrb r4, [r1, #2]
cbz r4, .L_strcpy_unalign3_copy3bytes cbz r4, strcpy_unalign3_copy3bytes
ldr r2, [r1], #4 ldr r2, [r1], #4
ldr r3, [r1], #4 ldr r3, [r1], #4
@ -425,26 +418,26 @@ ENTRY(strcat)
pld [r1, #64] pld [r1, #64]
lsrs lr, r2, #24 lsrs lr, r2, #24
beq .L_strcpy_copy4bytes beq strcpy_copy4bytes
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8 strd r2, r3, [r0], #8
b .L_strcpy_unalign3 b strcpy_unalign3
.L_strcpy_unalign3_copy1byte: strcpy_unalign3_copy1byte:
strb r2, [r0] strb r2, [r0]
m_pop m_pop
.L_strcpy_unalign3_copy2bytes: strcpy_unalign3_copy2bytes:
strb r2, [r0], #1 strb r2, [r0], #1
strb r3, [r0] strb r3, [r0]
m_pop m_pop
.L_strcpy_unalign3_copy3bytes: strcpy_unalign3_copy3bytes:
strb r2, [r0], #1 strb r2, [r0], #1
strb r3, [r0], #1 strb r3, [r0], #1
strb r4, [r0] strb r4, [r0]
@ -452,34 +445,34 @@ ENTRY(strcat)
.p2align 2 .p2align 2
// Can read 2 bytes before possibly crossing a page. // Can read 2 bytes before possibly crossing a page.
.L_strcpy_unalign2: strcpy_unalign2:
ldrb r2, [r1] ldrb r2, [r1]
cbz r2, .L_strcpy_unalign_copy1byte cbz r2, strcpy_unalign_copy1byte
ldrb r4, [r1, #1] ldrb r4, [r1, #1]
cbz r4, .L_strcpy_unalign_copy2bytes cbz r4, strcpy_unalign_copy2bytes
ldr r2, [r1], #4 ldr r2, [r1], #4
ldr r3, [r1], #4 ldr r3, [r1], #4
pld [r1, #64] pld [r1, #64]
tst r2, #0xff0000 tst r2, #0xff0000
beq .L_strcpy_copy3bytes beq strcpy_copy3bytes
lsrs ip, r2, #24 lsrs ip, r2, #24
beq .L_strcpy_copy4bytes beq strcpy_copy4bytes
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8 strd r2, r3, [r0], #8
b .L_strcpy_unalign2 b strcpy_unalign2
.p2align 2 .p2align 2
// Can read 1 byte before possibly crossing a page. // Can read 1 byte before possibly crossing a page.
.L_strcpy_unalign1: strcpy_unalign1:
ldrb r2, [r1] ldrb r2, [r1]
cbz r2, .L_strcpy_unalign_copy1byte cbz r2, strcpy_unalign_copy1byte
ldr r2, [r1], #4 ldr r2, [r1], #4
ldr r3, [r1], #4 ldr r3, [r1], #4
@ -489,27 +482,27 @@ ENTRY(strcat)
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8 strd r2, r3, [r0], #8
b .L_strcpy_unalign1 b strcpy_unalign1
.L_strcpy_unalign_copy1byte: strcpy_unalign_copy1byte:
strb r2, [r0] strb r2, [r0]
m_pop m_pop
.L_strcpy_unalign_copy2bytes: strcpy_unalign_copy2bytes:
strb r2, [r0], #1 strb r2, [r0], #1
strb r4, [r0] strb r4, [r0]
m_pop m_pop
.p2align 2 .p2align 2
.L_strcat_mainloop: strcat_mainloop:
ldrd r2, r3, [r0], #8 ldrd r2, r3, [r0], #8
pld [r0, #64] pld [r0, #64]
@ -517,59 +510,59 @@ ENTRY(strcat)
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcat_zero_in_first_register bne strcat_zero_in_first_register
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_strcat_zero_in_second_register bne strcat_zero_in_second_register
b .L_strcat_mainloop b strcat_mainloop
.L_strcat_zero_in_first_register: strcat_zero_in_first_register:
// Prefetch the src now, it's going to be used soon. // Prefetch the src now, it's going to be used soon.
pld [r1, #0] pld [r1, #0]
lsls lr, ip, #17 lsls lr, ip, #17
bne .L_strcat_sub8 bne strcat_sub8
bcs .L_strcat_sub7 bcs strcat_sub7
lsls ip, ip, #1 lsls ip, ip, #1
bne .L_strcat_sub6 bne strcat_sub6
sub r0, r0, #5 sub r0, r0, #5
b .L_strcat_r0_scan_done b strcat_r0_scan_done
.L_strcat_sub8: strcat_sub8:
sub r0, r0, #8 sub r0, r0, #8
b .L_strcat_r0_scan_done b strcat_r0_scan_done
.L_strcat_sub7: strcat_sub7:
sub r0, r0, #7 sub r0, r0, #7
b .L_strcat_r0_scan_done b strcat_r0_scan_done
.L_strcat_sub6: strcat_sub6:
sub r0, r0, #6 sub r0, r0, #6
b .L_strcat_r0_scan_done b strcat_r0_scan_done
.L_strcat_zero_in_second_register: strcat_zero_in_second_register:
// Prefetch the src now, it's going to be used soon. // Prefetch the src now, it's going to be used soon.
pld [r1, #0] pld [r1, #0]
lsls lr, ip, #17 lsls lr, ip, #17
bne .L_strcat_sub4 bne strcat_sub4
bcs .L_strcat_sub3 bcs strcat_sub3
lsls ip, ip, #1 lsls ip, ip, #1
bne .L_strcat_sub2 bne strcat_sub2
sub r0, r0, #1 sub r0, r0, #1
b .L_strcat_r0_scan_done b strcat_r0_scan_done
.L_strcat_sub4: strcat_sub4:
sub r0, r0, #4 sub r0, r0, #4
b .L_strcat_r0_scan_done b strcat_r0_scan_done
.L_strcat_sub3: strcat_sub3:
sub r0, r0, #3 sub r0, r0, #3
b .L_strcat_r0_scan_done b strcat_r0_scan_done
.L_strcat_sub2: strcat_sub2:
sub r0, r0, #2 sub r0, r0, #2
b .L_strcat_r0_scan_done b strcat_r0_scan_done
END(strcat) END(strcat)

View File

@ -149,20 +149,13 @@ ENTRY(strcpy)
.Lstringcopy_align_to_64: .Lstringcopy_align_to_64:
tst r3, #4 tst r3, #4
beq .Lstringcopy_check_src_align beq .Lstringcopy_check_src_align
// Read one byte at a time since we don't have any idea about the alignment ldr r2, [r1], #4
// of the source and we don't want to read into a different page.
ldrb r2, [r1], #1 sub ip, r2, #0x01010101
strb r2, [r0], #1 bic ip, ip, r2
cbz r2, .Lstringcopy_complete ands ip, ip, #0x80808080
ldrb r2, [r1], #1 bne .Lstringcopy_zero_in_first_register
strb r2, [r0], #1 str r2, [r0], #4
cbz r2, .Lstringcopy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .Lstringcopy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .Lstringcopy_complete
.Lstringcopy_check_src_align: .Lstringcopy_check_src_align:
// At this point dst is aligned to a double word, check if src // At this point dst is aligned to a double word, check if src

View File

@ -65,38 +65,38 @@ ENTRY(strlen)
mov r1, r0 mov r1, r0
ands r3, r0, #7 ands r3, r0, #7
beq .L_mainloop beq mainloop
// Align to a double word (64 bits). // Align to a double word (64 bits).
rsb r3, r3, #8 rsb r3, r3, #8
lsls ip, r3, #31 lsls ip, r3, #31
beq .L_align_to_32 beq align_to_32
ldrb r2, [r1], #1 ldrb r2, [r1], #1
cbz r2, .L_update_count_and_return cbz r2, update_count_and_return
.L_align_to_32: align_to_32:
bcc .L_align_to_64 bcc align_to_64
ands ip, r3, #2 ands ip, r3, #2
beq .L_align_to_64 beq align_to_64
ldrb r2, [r1], #1 ldrb r2, [r1], #1
cbz r2, .L_update_count_and_return cbz r2, update_count_and_return
ldrb r2, [r1], #1 ldrb r2, [r1], #1
cbz r2, .L_update_count_and_return cbz r2, update_count_and_return
.L_align_to_64: align_to_64:
tst r3, #4 tst r3, #4
beq .L_mainloop beq mainloop
ldr r3, [r1], #4 ldr r3, [r1], #4
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_zero_in_second_register bne zero_in_second_register
.p2align 2 .p2align 2
.L_mainloop: mainloop:
ldrd r2, r3, [r1], #8 ldrd r2, r3, [r1], #8
pld [r1, #64] pld [r1, #64]
@ -104,62 +104,62 @@ ENTRY(strlen)
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_zero_in_first_register bne zero_in_first_register
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .L_zero_in_second_register bne zero_in_second_register
b .L_mainloop b mainloop
.L_update_count_and_return: update_count_and_return:
sub r0, r1, r0 sub r0, r1, r0
sub r0, r0, #1 sub r0, r0, #1
bx lr bx lr
.L_zero_in_first_register: zero_in_first_register:
sub r0, r1, r0 sub r0, r1, r0
lsls r3, ip, #17 lsls r3, ip, #17
bne .L_sub8_and_return bne sub8_and_return
bcs .L_sub7_and_return bcs sub7_and_return
lsls ip, ip, #1 lsls ip, ip, #1
bne .L_sub6_and_return bne sub6_and_return
sub r0, r0, #5 sub r0, r0, #5
bx lr bx lr
.L_sub8_and_return: sub8_and_return:
sub r0, r0, #8 sub r0, r0, #8
bx lr bx lr
.L_sub7_and_return: sub7_and_return:
sub r0, r0, #7 sub r0, r0, #7
bx lr bx lr
.L_sub6_and_return: sub6_and_return:
sub r0, r0, #6 sub r0, r0, #6
bx lr bx lr
.L_zero_in_second_register: zero_in_second_register:
sub r0, r1, r0 sub r0, r1, r0
lsls r3, ip, #17 lsls r3, ip, #17
bne .L_sub4_and_return bne sub4_and_return
bcs .L_sub3_and_return bcs sub3_and_return
lsls ip, ip, #1 lsls ip, ip, #1
bne .L_sub2_and_return bne sub2_and_return
sub r0, r0, #1 sub r0, r0, #1
bx lr bx lr
.L_sub4_and_return: sub4_and_return:
sub r0, r0, #4 sub r0, r0, #4
bx lr bx lr
.L_sub3_and_return: sub3_and_return:
sub r0, r0, #3 sub r0, r0, #3
bx lr bx lr
.L_sub2_and_return: sub2_and_return:
sub r0, r0, #2 sub r0, r0, #2
bx lr bx lr
END(strlen) END(strlen)

View File

@ -1,17 +1,3 @@
libc_openbsd_src_files_exclude_arm += \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
libc_bionic_src_files_exclude_arm += \
arch-arm/generic/bionic/memcpy.S \
arch-arm/generic/bionic/memset.S \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
libc_bionic_src_files_arm += \ libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/memcpy.S \ arch-arm/cortex-a15/bionic/memcpy.S \
arch-arm/cortex-a15/bionic/memset.S \ arch-arm/cortex-a15/bionic/memset.S \
@ -23,5 +9,8 @@ libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/__strcpy_chk.S \ arch-arm/cortex-a15/bionic/__strcpy_chk.S \
arch-arm/cortex-a15/bionic/strlen.S \ arch-arm/cortex-a15/bionic/strlen.S \
libc_bionic_src_files_arm += \
arch-arm/generic/bionic/memcmp.S \
libc_bionic_src_files_arm += \ libc_bionic_src_files_arm += \
arch-arm/denver/bionic/memmove.S \ arch-arm/denver/bionic/memmove.S \

View File

@ -1,32 +0,0 @@
# This file represents the best optimized routines that are the middle
# ground when running on a big/little system that is cortex-a57/cortex-a53.
# The cortex-a7 optimized routines, and the cortex-a53 optimized routines
# decrease performance on cortex-a57 processors by as much as 20%.
libc_openbsd_src_files_exclude_arm += \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
libc_bionic_src_files_exclude_arm += \
arch-arm/generic/bionic/memcpy.S \
arch-arm/generic/bionic/memset.S \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/memcpy.S \
arch-arm/cortex-a15/bionic/memset.S \
arch-arm/cortex-a15/bionic/stpcpy.S \
arch-arm/cortex-a15/bionic/strcat.S \
arch-arm/cortex-a15/bionic/__strcat_chk.S \
arch-arm/cortex-a15/bionic/strcmp.S \
arch-arm/cortex-a15/bionic/strcpy.S \
arch-arm/cortex-a15/bionic/__strcpy_chk.S \
arch-arm/cortex-a15/bionic/strlen.S \
libc_bionic_src_files_arm += \
arch-arm/denver/bionic/memmove.S \

View File

@ -1,32 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
// Indicate which memcpy base file to include.
#define MEMCPY_BASE "arch-arm/cortex-a53/bionic/memcpy_base.S"
#include "arch-arm/cortex-a15/bionic/__strcat_chk_common.S"

View File

@ -1,143 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.L_memcpy_base:
// Assumes that n >= 0, and dst, src are valid pointers.
cmp r2, #16
blo .L_copy_less_than_16_unknown_align
.L_copy_unknown_alignment:
// Unknown alignment of src and dst.
// Assumes that the first few bytes have already been prefetched.
// Align destination to 128 bits. The mainloop store instructions
// require this alignment or they will throw an exception.
rsb r3, r0, #0
ands r3, r3, #0xF
beq 2f
// Copy up to 15 bytes (count in r3).
sub r2, r2, r3
movs ip, r3, lsl #31
itt mi
ldrbmi lr, [r1], #1
strbmi lr, [r0], #1
itttt cs
ldrbcs ip, [r1], #1
ldrbcs lr, [r1], #1
strbcs ip, [r0], #1
strbcs lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// Copies 4 bytes, dst 32 bits aligned before, at least 64 bits after.
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
1: bcc 2f
// Copies 8 bytes, dst 64 bits aligned before, at least 128 bits after.
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
2: // Make sure we have at least 64 bytes to copy.
subs r2, r2, #64
blo 2f
1: // The main loop copies 64 bytes at a time.
vld1.8 {d0 - d3}, [r1]!
vld1.8 {d4 - d7}, [r1]!
subs r2, r2, #64
vstmia r0!, {d0 - d7}
pld [r1, #(64*10)]
bhs 1b
2: // Fix-up the remaining count and make sure we have >= 32 bytes left.
adds r2, r2, #32
blo 3f
// 32 bytes. These cache lines were already preloaded.
vld1.8 {d0 - d3}, [r1]!
sub r2, r2, #32
vst1.8 {d0 - d3}, [r0, :128]!
3: // Less than 32 left.
add r2, r2, #32
tst r2, #0x10
beq .L_copy_less_than_16_unknown_align
// Copies 16 bytes, destination 128 bits aligned.
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0, :128]!
.L_copy_less_than_16_unknown_align:
// Copy up to 15 bytes (count in r2).
movs ip, r2, lsl #29
bcc 1f
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]!
1: bge 2f
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
2: // Copy 0 to 4 bytes.
lsls r2, r2, #31
itt ne
ldrbne lr, [r1], #1
strbne lr, [r0], #1
itttt cs
ldrbcs ip, [r1], #1
ldrbcs lr, [r1]
strbcs ip, [r0], #1
strbcs lr, [r0]
pop {r0, pc}

View File

@ -1,31 +0,0 @@
libc_openbsd_src_files_exclude_arm += \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
libc_bionic_src_files_exclude_arm += \
arch-arm/generic/bionic/memcpy.S \
arch-arm/generic/bionic/memset.S \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
libc_bionic_src_files_arm += \
arch-arm/cortex-a53/bionic/memcpy.S \
arch-arm/cortex-a53/bionic/__strcat_chk.S \
arch-arm/cortex-a53/bionic/__strcpy_chk.S \
libc_bionic_src_files_arm += \
arch-arm/cortex-a7/bionic/memset.S \
libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/stpcpy.S \
arch-arm/cortex-a15/bionic/strcat.S \
arch-arm/cortex-a15/bionic/strcmp.S \
arch-arm/cortex-a15/bionic/strcpy.S \
arch-arm/cortex-a15/bionic/strlen.S \
libc_bionic_src_files_arm += \
arch-arm/denver/bionic/memmove.S \

View File

@ -1,180 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <private/bionic_asm.h>
#include <private/libc_events.h>
/*
* Optimized memset() for ARM.
*
* memset() returns its first argument.
*/
.fpu neon
.syntax unified
ENTRY(__memset_chk)
cmp r2, r3
bls .L_done
// Preserve lr for backtrace.
push {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0
ldr r0, error_message
ldr r1, error_code
1:
add r0, pc
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_MEMSET_BUFFER_OVERFLOW
error_message:
.word error_string-(1b+8)
END(__memset_chk)
ENTRY(bzero)
mov r2, r1
mov r1, #0
.L_done:
// Fall through to memset...
END(bzero)
ENTRY(memset)
mov r3, r0
// At this point only d0, d1 are going to be used below.
vdup.8 q0, r1
cmp r2, #16
blo .L_set_less_than_16_unknown_align
.L_check_alignment:
// Align destination to a double word to avoid the store crossing
// a cache line boundary.
ands ip, r3, #7
bne .L_do_double_word_align
.L_double_word_aligned:
// Duplicate since the less than 64 can use d2, d3.
vmov q1, q0
subs r2, #64
blo .L_set_less_than_64
// Duplicate the copy value so that we can store 64 bytes at a time.
vmov q2, q0
vmov q3, q0
1: // Main loop stores 64 bytes at a time.
subs r2, #64
vstmia r3!, {d0 - d7}
bge 1b
.L_set_less_than_64:
// Restore r2 to the count of bytes left to set.
add r2, #64
lsls ip, r2, #27
bcc .L_set_less_than_32
// Set 32 bytes.
vstmia r3!, {d0 - d3}
.L_set_less_than_32:
bpl .L_set_less_than_16
// Set 16 bytes.
vstmia r3!, {d0, d1}
.L_set_less_than_16:
// Less than 16 bytes to set.
lsls ip, r2, #29
bcc .L_set_less_than_8
// Set 8 bytes.
vstmia r3!, {d0}
.L_set_less_than_8:
bpl .L_set_less_than_4
// Set 4 bytes
vst1.32 {d0[0]}, [r3]!
.L_set_less_than_4:
lsls ip, r2, #31
it ne
strbne r1, [r3], #1
itt cs
strbcs r1, [r3], #1
strbcs r1, [r3]
bx lr
.L_do_double_word_align:
rsb ip, ip, #8
sub r2, r2, ip
// Do this comparison now, otherwise we'll need to save a
// register to the stack since we've used all available
// registers.
cmp ip, #4
blo 1f
// Need to do a four byte copy.
movs ip, ip, lsl #31
it mi
strbmi r1, [r3], #1
itt cs
strbcs r1, [r3], #1
strbcs r1, [r3], #1
vst1.32 {d0[0]}, [r3]!
b .L_double_word_aligned
1:
// No four byte copy.
movs ip, ip, lsl #31
it mi
strbmi r1, [r3], #1
itt cs
strbcs r1, [r3], #1
strbcs r1, [r3], #1
b .L_double_word_aligned
.L_set_less_than_16_unknown_align:
// Set up to 15 bytes.
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r3]!
1: bge 2f
vst1.32 {d0[0]}, [r3]!
2: movs ip, r2, lsl #31
it mi
strbmi r1, [r3], #1
itt cs
strbcs r1, [r3], #1
strbcs r1, [r3], #1
bx lr
END(memset)
.data
error_string:
.string "memset: prevented write past end of buffer"

View File

@ -1,29 +1 @@
libc_openbsd_src_files_exclude_arm += \ include bionic/libc/arch-arm/cortex-a15/cortex-a15.mk
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
libc_bionic_src_files_exclude_arm += \
arch-arm/generic/bionic/memcpy.S \
arch-arm/generic/bionic/memset.S \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
libc_bionic_src_files_arm += \
arch-arm/cortex-a7/bionic/memset.S \
libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/memcpy.S \
arch-arm/cortex-a15/bionic/stpcpy.S \
arch-arm/cortex-a15/bionic/strcat.S \
arch-arm/cortex-a15/bionic/__strcat_chk.S \
arch-arm/cortex-a15/bionic/strcmp.S \
arch-arm/cortex-a15/bionic/strcpy.S \
arch-arm/cortex-a15/bionic/__strcpy_chk.S \
arch-arm/cortex-a15/bionic/strlen.S \
libc_bionic_src_files_arm += \
arch-arm/denver/bionic/memmove.S \

View File

@ -133,7 +133,8 @@ ENTRY_PRIVATE(MEMCPY_BASE)
strbcs ip, [r0], #1 strbcs ip, [r0], #1
strbcs lr, [r0], #1 strbcs lr, [r0], #1
ldmfd sp!, {r0, pc} ldmfd sp!, {r0, lr}
bx lr
END(MEMCPY_BASE) END(MEMCPY_BASE)
ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED) ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED)

View File

@ -35,7 +35,6 @@
*/ */
.fpu neon .fpu neon
.syntax unified
ENTRY(__memset_chk) ENTRY(__memset_chk)
cmp r2, r3 cmp r2, r3
@ -69,9 +68,12 @@ END(bzero)
ENTRY(memset) ENTRY(memset)
// The neon memset only wins for less than 132. // The neon memset only wins for less than 132.
cmp r2, #132 cmp r2, #132
bhi .L_memset_large_copy bhi __memset_large_copy
stmfd sp!, {r0}
.cfi_def_cfa_offset 4
.cfi_rel_offset r0, 0
mov r3, r0
vdup.8 q0, r1 vdup.8 q0, r1
/* make sure we have at least 32 bytes to write */ /* make sure we have at least 32 bytes to write */
@ -81,7 +83,7 @@ ENTRY(memset)
1: /* The main loop writes 32 bytes at a time */ 1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32 subs r2, r2, #32
vst1.8 {d0 - d3}, [r3]! vst1.8 {d0 - d3}, [r0]!
bhs 1b bhs 1b
2: /* less than 32 left */ 2: /* less than 32 left */
@ -90,20 +92,22 @@ ENTRY(memset)
beq 3f beq 3f
// writes 16 bytes, 128-bits aligned // writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r3]! vst1.8 {d0, d1}, [r0]!
3: /* write up to 15-bytes (count in r2) */ 3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29 movs ip, r2, lsl #29
bcc 1f bcc 1f
vst1.8 {d0}, [r3]! vst1.8 {d0}, [r0]!
1: bge 2f 1: bge 2f
vst1.32 {d0[0]}, [r3]! vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31 2: movs ip, r2, lsl #31
strbmi r1, [r3], #1 strmib r1, [r0], #1
strbcs r1, [r3], #1 strcsb r1, [r0], #1
strbcs r1, [r3], #1 strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr bx lr
END(memset)
.L_memset_large_copy: ENTRY_PRIVATE(__memset_large_copy)
/* compute the offset to align the destination /* compute the offset to align the destination
* offset = (4-(src&3))&3 = -src & 3 * offset = (4-(src&3))&3 = -src & 3
*/ */
@ -127,11 +131,12 @@ ENTRY(memset)
orr r1, r1, r1, lsr #16 orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31 movs r12, r3, lsl #31
strbcs r1, [r0], #1 /* can't use strh (alignment unknown) */ strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strbcs r1, [r0], #1 strcsb r1, [r0], #1
strbmi r1, [r0], #1 strmib r1, [r0], #1
subs r2, r2, r3 subs r2, r2, r3
popls {r0, r4-r7, pc} /* return */ ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */ /* align the destination to a cache-line */
mov r12, r1 mov r12, r1
@ -150,9 +155,9 @@ ENTRY(memset)
/* conditionally writes 0 to 7 words (length in r3) */ /* conditionally writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28 movs r3, r3, lsl #28
stmcs r0!, {r1, lr} stmcsia r0!, {r1, lr}
stmcs r0!, {r1, lr} stmcsia r0!, {r1, lr}
stmmi r0!, {r1, lr} stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2 movs r3, r3, lsl #2
strcs r1, [r0], #4 strcs r1, [r0], #4
@ -167,15 +172,16 @@ ENTRY(memset)
/* conditionally stores 0 to 31 bytes */ /* conditionally stores 0 to 31 bytes */
movs r2, r2, lsl #28 movs r2, r2, lsl #28
stmcs r0!, {r1,r3,r12,lr} stmcsia r0!, {r1,r3,r12,lr}
stmmi r0!, {r1, lr} stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2 movs r2, r2, lsl #2
strcs r1, [r0], #4 strcs r1, [r0], #4
strhmi r1, [r0], #2 strmih r1, [r0], #2
movs r2, r2, lsl #2 movs r2, r2, lsl #2
strbcs r1, [r0] strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, pc} ldmfd sp!, {r0, r4-r7, lr}
END(memset) bx lr
END(__memset_large_copy)
.data .data
error_string: error_string:

View File

@ -70,7 +70,7 @@
.macro m_scan_byte .macro m_scan_byte
ldrb r3, [r0] ldrb r3, [r0]
cbz r3, .Lstrcat_r0_scan_done cbz r3, strcat_r0_scan_done
add r0, #1 add r0, #1
.endm // m_scan_byte .endm // m_scan_byte
@ -84,10 +84,10 @@ ENTRY(strcat)
// Quick check to see if src is empty. // Quick check to see if src is empty.
ldrb r2, [r1] ldrb r2, [r1]
pld [r1, #0] pld [r1, #0]
cbnz r2, .Lstrcat_continue cbnz r2, strcat_continue
bx lr bx lr
.Lstrcat_continue: strcat_continue:
// To speed up really small dst strings, unroll checking the first 4 bytes. // To speed up really small dst strings, unroll checking the first 4 bytes.
m_push m_push
m_scan_byte m_scan_byte
@ -96,10 +96,10 @@ ENTRY(strcat)
m_scan_byte m_scan_byte
ands r3, r0, #7 ands r3, r0, #7
bne .Lstrcat_align_src bne strcat_align_src
.p2align 2 .p2align 2
.Lstrcat_mainloop: strcat_mainloop:
ldmia r0!, {r2, r3} ldmia r0!, {r2, r3}
pld [r0, #64] pld [r0, #64]
@ -107,28 +107,28 @@ ENTRY(strcat)
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcat_zero_in_first_register bne strcat_zero_in_first_register
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcat_zero_in_second_register bne strcat_zero_in_second_register
b .Lstrcat_mainloop b strcat_mainloop
.Lstrcat_zero_in_first_register: strcat_zero_in_first_register:
sub r0, r0, #4 sub r0, r0, #4
.Lstrcat_zero_in_second_register: strcat_zero_in_second_register:
// Check for zero in byte 0. // Check for zero in byte 0.
tst ip, #0x80 tst ip, #0x80
it ne it ne
subne r0, r0, #4 subne r0, r0, #4
bne .Lstrcat_r0_scan_done bne strcat_r0_scan_done
// Check for zero in byte 1. // Check for zero in byte 1.
tst ip, #0x8000 tst ip, #0x8000
it ne it ne
subne r0, r0, #3 subne r0, r0, #3
bne .Lstrcat_r0_scan_done bne strcat_r0_scan_done
// Check for zero in byte 2. // Check for zero in byte 2.
tst ip, #0x800000 tst ip, #0x800000
it ne it ne
@ -137,33 +137,33 @@ ENTRY(strcat)
// Zero is in byte 3. // Zero is in byte 3.
subeq r0, r0, #1 subeq r0, r0, #1
.Lstrcat_r0_scan_done: strcat_r0_scan_done:
// Unroll the first 8 bytes that will be copied. // Unroll the first 8 bytes that will be copied.
m_copy_byte reg=r2, cmd=cbz, label=.Lstrcpy_finish m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=.Lstrcpy_finish m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=.Lstrcpy_finish m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbz, label=.Lstrcpy_finish m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r2, cmd=cbz, label=.Lstrcpy_finish m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=.Lstrcpy_finish m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=.Lstrcpy_finish m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbnz, label=.Lstrcpy_continue m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
.Lstrcpy_finish: strcpy_finish:
m_ret inst=pop m_ret inst=pop
.Lstrcpy_continue: strcpy_continue:
pld [r1, #0] pld [r1, #0]
ands r3, r0, #7 ands r3, r0, #7
bne .Lstrcpy_align_dst bne strcpy_align_dst
.Lstrcpy_check_src_align: strcpy_check_src_align:
// At this point dst is aligned to a double word, check if src // At this point dst is aligned to a double word, check if src
// is also aligned to a double word. // is also aligned to a double word.
ands r3, r1, #7 ands r3, r1, #7
bne .Lstrcpy_unaligned_copy bne strcpy_unaligned_copy
.p2align 2 .p2align 2
.Lstrcpy_mainloop: strcpy_mainloop:
ldmia r1!, {r2, r3} ldmia r1!, {r2, r3}
pld [r1, #64] pld [r1, #64]
@ -171,17 +171,17 @@ ENTRY(strcat)
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_first_register bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_second_register bne strcpy_zero_in_second_register
stmia r0!, {r2, r3} stmia r0!, {r2, r3}
b .Lstrcpy_mainloop b strcpy_mainloop
.Lstrcpy_zero_in_first_register: strcpy_zero_in_first_register:
lsls lr, ip, #17 lsls lr, ip, #17
itt ne itt ne
strbne r2, [r0] strbne r2, [r0]
@ -198,7 +198,7 @@ ENTRY(strcat)
strb r3, [r0] strb r3, [r0]
m_ret inst=pop m_ret inst=pop
.Lstrcpy_zero_in_second_register: strcpy_zero_in_second_register:
lsls lr, ip, #17 lsls lr, ip, #17
ittt ne ittt ne
stmiane r0!, {r2} stmiane r0!, {r2}
@ -218,18 +218,18 @@ ENTRY(strcat)
strb r4, [r0] strb r4, [r0]
m_ret inst=pop m_ret inst=pop
.Lstrcpy_align_dst: strcpy_align_dst:
// Align to a double word (64 bits). // Align to a double word (64 bits).
rsb r3, r3, #8 rsb r3, r3, #8
lsls ip, r3, #31 lsls ip, r3, #31
beq .Lstrcpy_align_to_32 beq strcpy_align_to_32
ldrb r2, [r1], #1 ldrb r2, [r1], #1
strb r2, [r0], #1 strb r2, [r0], #1
cbz r2, .Lstrcpy_complete cbz r2, strcpy_complete
.Lstrcpy_align_to_32: strcpy_align_to_32:
bcc .Lstrcpy_align_to_64 bcc strcpy_align_to_64
ldrb r4, [r1], #1 ldrb r4, [r1], #1
strb r4, [r0], #1 strb r4, [r0], #1
@ -242,83 +242,76 @@ ENTRY(strcat)
it eq it eq
m_ret inst=popeq m_ret inst=popeq
.Lstrcpy_align_to_64: strcpy_align_to_64:
tst r3, #4 tst r3, #4
beq .Lstrcpy_check_src_align beq strcpy_check_src_align
// Read one byte at a time since we don't know the src alignment ldr r2, [r1], #4
// and we don't want to read into a different page.
ldrb r4, [r1], #1
strb r4, [r0], #1
cbz r4, .Lstrcpy_complete
ldrb r5, [r1], #1
strb r5, [r0], #1
cbz r5, .Lstrcpy_complete
ldrb r4, [r1], #1
strb r4, [r0], #1
cbz r4, .Lstrcpy_complete
ldrb r5, [r1], #1
strb r5, [r0], #1
cbz r5, .Lstrcpy_complete
b .Lstrcpy_check_src_align
.Lstrcpy_complete: sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
stmia r0!, {r2}
b strcpy_check_src_align
strcpy_complete:
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unaligned_copy: strcpy_unaligned_copy:
// Dst is aligned to a double word, while src is at an unknown alignment. // Dst is aligned to a double word, while src is at an unknown alignment.
// There are 7 different versions of the unaligned copy code // There are 7 different versions of the unaligned copy code
// to prevent overreading the src. The mainloop of every single version // to prevent overreading the src. The mainloop of every single version
// will store 64 bits per loop. The difference is how much of src can // will store 64 bits per loop. The difference is how much of src can
// be read without potentially crossing a page boundary. // be read without potentially crossing a page boundary.
tbb [pc, r3] tbb [pc, r3]
.Lstrcpy_unaligned_branchtable: strcpy_unaligned_branchtable:
.byte 0 .byte 0
.byte ((.Lstrcpy_unalign7 - .Lstrcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
.byte ((.Lstrcpy_unalign6 - .Lstrcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
.byte ((.Lstrcpy_unalign5 - .Lstrcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
.byte ((.Lstrcpy_unalign4 - .Lstrcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
.byte ((.Lstrcpy_unalign3 - .Lstrcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
.byte ((.Lstrcpy_unalign2 - .Lstrcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
.byte ((.Lstrcpy_unalign1 - .Lstrcpy_unaligned_branchtable)/2) .byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
.p2align 2 .p2align 2
// Can read 7 bytes before possibly crossing a page. // Can read 7 bytes before possibly crossing a page.
.Lstrcpy_unalign7: strcpy_unalign7:
ldr r2, [r1], #4 ldr r2, [r1], #4
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_first_register bne strcpy_zero_in_first_register
ldrb r3, [r1] ldrb r3, [r1]
cbz r3, .Lstrcpy_unalign7_copy5bytes cbz r3, strcpy_unalign7_copy5bytes
ldrb r4, [r1, #1] ldrb r4, [r1, #1]
cbz r4, .Lstrcpy_unalign7_copy6bytes cbz r4, strcpy_unalign7_copy6bytes
ldrb r5, [r1, #2] ldrb r5, [r1, #2]
cbz r5, .Lstrcpy_unalign7_copy7bytes cbz r5, strcpy_unalign7_copy7bytes
ldr r3, [r1], #4 ldr r3, [r1], #4
pld [r1, #64] pld [r1, #64]
lsrs ip, r3, #24 lsrs ip, r3, #24
stmia r0!, {r2, r3} stmia r0!, {r2, r3}
beq .Lstrcpy_unalign_return beq strcpy_unalign_return
b .Lstrcpy_unalign7 b strcpy_unalign7
.Lstrcpy_unalign7_copy5bytes: strcpy_unalign7_copy5bytes:
stmia r0!, {r2} stmia r0!, {r2}
strb r3, [r0] strb r3, [r0]
.Lstrcpy_unalign_return: strcpy_unalign_return:
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unalign7_copy6bytes: strcpy_unalign7_copy6bytes:
stmia r0!, {r2} stmia r0!, {r2}
strb r3, [r0], #1 strb r3, [r0], #1
strb r4, [r0], #1 strb r4, [r0], #1
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unalign7_copy7bytes: strcpy_unalign7_copy7bytes:
stmia r0!, {r2} stmia r0!, {r2}
strb r3, [r0], #1 strb r3, [r0], #1
strb r4, [r0], #1 strb r4, [r0], #1
@ -327,30 +320,30 @@ ENTRY(strcat)
.p2align 2 .p2align 2
// Can read 6 bytes before possibly crossing a page. // Can read 6 bytes before possibly crossing a page.
.Lstrcpy_unalign6: strcpy_unalign6:
ldr r2, [r1], #4 ldr r2, [r1], #4
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_first_register bne strcpy_zero_in_first_register
ldrb r4, [r1] ldrb r4, [r1]
cbz r4, .Lstrcpy_unalign_copy5bytes cbz r4, strcpy_unalign_copy5bytes
ldrb r5, [r1, #1] ldrb r5, [r1, #1]
cbz r5, .Lstrcpy_unalign_copy6bytes cbz r5, strcpy_unalign_copy6bytes
ldr r3, [r1], #4 ldr r3, [r1], #4
pld [r1, #64] pld [r1, #64]
tst r3, #0xff0000 tst r3, #0xff0000
beq .Lstrcpy_unalign6_copy7bytes beq strcpy_unalign6_copy7bytes
lsrs ip, r3, #24 lsrs ip, r3, #24
stmia r0!, {r2, r3} stmia r0!, {r2, r3}
beq .Lstrcpy_unalign_return beq strcpy_unalign_return
b .Lstrcpy_unalign6 b strcpy_unalign6
.Lstrcpy_unalign6_copy7bytes: strcpy_unalign6_copy7bytes:
stmia r0!, {r2} stmia r0!, {r2}
strh r3, [r0], #2 strh r3, [r0], #2
lsr r3, #16 lsr r3, #16
@ -359,16 +352,16 @@ ENTRY(strcat)
.p2align 2 .p2align 2
// Can read 5 bytes before possibly crossing a page. // Can read 5 bytes before possibly crossing a page.
.Lstrcpy_unalign5: strcpy_unalign5:
ldr r2, [r1], #4 ldr r2, [r1], #4
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_first_register bne strcpy_zero_in_first_register
ldrb r4, [r1] ldrb r4, [r1]
cbz r4, .Lstrcpy_unalign_copy5bytes cbz r4, strcpy_unalign_copy5bytes
ldr r3, [r1], #4 ldr r3, [r1], #4
@ -377,17 +370,17 @@ ENTRY(strcat)
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_second_register bne strcpy_zero_in_second_register
stmia r0!, {r2, r3} stmia r0!, {r2, r3}
b .Lstrcpy_unalign5 b strcpy_unalign5
.Lstrcpy_unalign_copy5bytes: strcpy_unalign_copy5bytes:
stmia r0!, {r2} stmia r0!, {r2}
strb r4, [r0] strb r4, [r0]
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unalign_copy6bytes: strcpy_unalign_copy6bytes:
stmia r0!, {r2} stmia r0!, {r2}
strb r4, [r0], #1 strb r4, [r0], #1
strb r5, [r0] strb r5, [r0]
@ -395,13 +388,13 @@ ENTRY(strcat)
.p2align 2 .p2align 2
// Can read 4 bytes before possibly crossing a page. // Can read 4 bytes before possibly crossing a page.
.Lstrcpy_unalign4: strcpy_unalign4:
ldmia r1!, {r2} ldmia r1!, {r2}
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_first_register bne strcpy_zero_in_first_register
ldmia r1!, {r3} ldmia r1!, {r3}
pld [r1, #64] pld [r1, #64]
@ -409,20 +402,20 @@ ENTRY(strcat)
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_second_register bne strcpy_zero_in_second_register
stmia r0!, {r2, r3} stmia r0!, {r2, r3}
b .Lstrcpy_unalign4 b strcpy_unalign4
.p2align 2 .p2align 2
// Can read 3 bytes before possibly crossing a page. // Can read 3 bytes before possibly crossing a page.
.Lstrcpy_unalign3: strcpy_unalign3:
ldrb r2, [r1] ldrb r2, [r1]
cbz r2, .Lstrcpy_unalign3_copy1byte cbz r2, strcpy_unalign3_copy1byte
ldrb r3, [r1, #1] ldrb r3, [r1, #1]
cbz r3, .Lstrcpy_unalign3_copy2bytes cbz r3, strcpy_unalign3_copy2bytes
ldrb r4, [r1, #2] ldrb r4, [r1, #2]
cbz r4, .Lstrcpy_unalign3_copy3bytes cbz r4, strcpy_unalign3_copy3bytes
ldr r2, [r1], #4 ldr r2, [r1], #4
ldr r3, [r1], #4 ldr r3, [r1], #4
@ -430,26 +423,26 @@ ENTRY(strcat)
pld [r1, #64] pld [r1, #64]
lsrs lr, r2, #24 lsrs lr, r2, #24
beq .Lstrcpy_unalign_copy4bytes beq strcpy_unalign_copy4bytes
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_second_register bne strcpy_zero_in_second_register
stmia r0!, {r2, r3} stmia r0!, {r2, r3}
b .Lstrcpy_unalign3 b strcpy_unalign3
.Lstrcpy_unalign3_copy1byte: strcpy_unalign3_copy1byte:
strb r2, [r0] strb r2, [r0]
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unalign3_copy2bytes: strcpy_unalign3_copy2bytes:
strb r2, [r0], #1 strb r2, [r0], #1
strb r3, [r0] strb r3, [r0]
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unalign3_copy3bytes: strcpy_unalign3_copy3bytes:
strb r2, [r0], #1 strb r2, [r0], #1
strb r3, [r0], #1 strb r3, [r0], #1
strb r4, [r0] strb r4, [r0]
@ -457,34 +450,34 @@ ENTRY(strcat)
.p2align 2 .p2align 2
// Can read 2 bytes before possibly crossing a page. // Can read 2 bytes before possibly crossing a page.
.Lstrcpy_unalign2: strcpy_unalign2:
ldrb r2, [r1] ldrb r2, [r1]
cbz r2, .Lstrcpy_unalign_copy1byte cbz r2, strcpy_unalign_copy1byte
ldrb r3, [r1, #1] ldrb r3, [r1, #1]
cbz r3, .Lstrcpy_unalign_copy2bytes cbz r3, strcpy_unalign_copy2bytes
ldr r2, [r1], #4 ldr r2, [r1], #4
ldr r3, [r1], #4 ldr r3, [r1], #4
pld [r1, #64] pld [r1, #64]
tst r2, #0xff0000 tst r2, #0xff0000
beq .Lstrcpy_unalign_copy3bytes beq strcpy_unalign_copy3bytes
lsrs ip, r2, #24 lsrs ip, r2, #24
beq .Lstrcpy_unalign_copy4bytes beq strcpy_unalign_copy4bytes
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_second_register bne strcpy_zero_in_second_register
stmia r0!, {r2, r3} stmia r0!, {r2, r3}
b .Lstrcpy_unalign2 b strcpy_unalign2
.p2align 2 .p2align 2
// Can read 1 byte before possibly crossing a page. // Can read 1 byte before possibly crossing a page.
.Lstrcpy_unalign1: strcpy_unalign1:
ldrb r2, [r1] ldrb r2, [r1]
cbz r2, .Lstrcpy_unalign_copy1byte cbz r2, strcpy_unalign_copy1byte
ldr r2, [r1], #4 ldr r2, [r1], #4
ldr r3, [r1], #4 ldr r3, [r1], #4
@ -494,62 +487,62 @@ ENTRY(strcat)
sub ip, r2, #0x01010101 sub ip, r2, #0x01010101
bic ip, ip, r2 bic ip, ip, r2
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_first_register bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcpy_zero_in_second_register bne strcpy_zero_in_second_register
stmia r0!, {r2, r3} stmia r0!, {r2, r3}
b .Lstrcpy_unalign1 b strcpy_unalign1
.Lstrcpy_unalign_copy1byte: strcpy_unalign_copy1byte:
strb r2, [r0] strb r2, [r0]
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unalign_copy2bytes: strcpy_unalign_copy2bytes:
strb r2, [r0], #1 strb r2, [r0], #1
strb r3, [r0] strb r3, [r0]
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unalign_copy3bytes: strcpy_unalign_copy3bytes:
strh r2, [r0], #2 strh r2, [r0], #2
lsr r2, #16 lsr r2, #16
strb r2, [r0] strb r2, [r0]
m_ret inst=pop m_ret inst=pop
.Lstrcpy_unalign_copy4bytes: strcpy_unalign_copy4bytes:
stmia r0, {r2} stmia r0, {r2}
m_ret inst=pop m_ret inst=pop
.Lstrcat_align_src: strcat_align_src:
// Align to a double word (64 bits). // Align to a double word (64 bits).
rsb r3, r3, #8 rsb r3, r3, #8
lsls ip, r3, #31 lsls ip, r3, #31
beq .Lstrcat_align_to_32 beq strcat_align_to_32
ldrb r2, [r0], #1 ldrb r2, [r0], #1
cbz r2, .Lstrcat_r0_update cbz r2, strcat_r0_update
.Lstrcat_align_to_32: strcat_align_to_32:
bcc .Lstrcat_align_to_64 bcc strcat_align_to_64
ldrb r2, [r0], #1 ldrb r2, [r0], #1
cbz r2, .Lstrcat_r0_update cbz r2, strcat_r0_update
ldrb r2, [r0], #1 ldrb r2, [r0], #1
cbz r2, .Lstrcat_r0_update cbz r2, strcat_r0_update
.Lstrcat_align_to_64: strcat_align_to_64:
tst r3, #4 tst r3, #4
beq .Lstrcat_mainloop beq strcat_mainloop
ldr r3, [r0], #4 ldr r3, [r0], #4
sub ip, r3, #0x01010101 sub ip, r3, #0x01010101
bic ip, ip, r3 bic ip, ip, r3
ands ip, ip, #0x80808080 ands ip, ip, #0x80808080
bne .Lstrcat_zero_in_second_register bne strcat_zero_in_second_register
b .Lstrcat_mainloop b strcat_mainloop
.Lstrcat_r0_update: strcat_r0_update:
sub r0, r0, #1 sub r0, r0, #1
b .Lstrcat_r0_scan_done b strcat_r0_scan_done
END(strcat) END(strcat)

View File

@ -244,20 +244,13 @@ ENTRY(strcpy)
.Lstringcopy_align_to_64: .Lstringcopy_align_to_64:
tst r3, #4 tst r3, #4
beq .Lstringcopy_check_src_align beq .Lstringcopy_check_src_align
// Read one byte at a time since we don't have any idea about the alignment ldr r2, [r1], #4
// of the source and we don't want to read into a different page.
ldrb r2, [r1], #1 sub ip, r2, #0x01010101
strb r2, [r0], #1 bic ip, ip, r2
cbz r2, .Lstringcopy_complete ands ip, ip, #0x80808080
ldrb r2, [r1], #1 bne .Lstringcopy_zero_in_first_register
strb r2, [r0], #1 stmia r0!, {r2}
cbz r2, .Lstringcopy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .Lstringcopy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .Lstringcopy_complete
b .Lstringcopy_check_src_align b .Lstringcopy_check_src_align
.Lstringcopy_complete: .Lstringcopy_complete:

View File

@ -1,18 +1,3 @@
libc_openbsd_src_files_exclude_arm += \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
upstream-openbsd/lib/libc/string/strcpy.c \
libc_bionic_src_files_exclude_arm += \
arch-arm/generic/bionic/memcpy.S \
arch-arm/generic/bionic/memset.S \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
libc_bionic_src_files_arm += \ libc_bionic_src_files_arm += \
arch-arm/cortex-a9/bionic/memcpy.S \ arch-arm/cortex-a9/bionic/memcpy.S \
arch-arm/cortex-a9/bionic/memset.S \ arch-arm/cortex-a9/bionic/memset.S \
@ -24,5 +9,8 @@ libc_bionic_src_files_arm += \
arch-arm/cortex-a9/bionic/__strcpy_chk.S \ arch-arm/cortex-a9/bionic/__strcpy_chk.S \
arch-arm/cortex-a9/bionic/strlen.S \ arch-arm/cortex-a9/bionic/strlen.S \
libc_bionic_src_files_arm += \
arch-arm/generic/bionic/memcmp.S \
libc_bionic_src_files_arm += \ libc_bionic_src_files_arm += \
arch-arm/denver/bionic/memmove.S \ arch-arm/denver/bionic/memmove.S \

View File

@ -1,18 +1,5 @@
libc_openbsd_src_files_exclude_arm += \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
libc_bionic_src_files_exclude_arm += \
arch-arm/generic/bionic/memcpy.S \
arch-arm/generic/bionic/memset.S \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
libc_bionic_src_files_arm += \ libc_bionic_src_files_arm += \
arch-arm/generic/bionic/memcmp.S \
arch-arm/denver/bionic/memcpy.S \ arch-arm/denver/bionic/memcpy.S \
arch-arm/denver/bionic/memmove.S \ arch-arm/denver/bionic/memmove.S \
arch-arm/denver/bionic/memset.S \ arch-arm/denver/bionic/memset.S \

View File

@ -40,8 +40,6 @@
* Optimized memcmp() for Cortex-A9. * Optimized memcmp() for Cortex-A9.
*/ */
.syntax unified
ENTRY(memcmp) ENTRY(memcmp)
pld [r0, #(CACHE_LINE_SIZE * 0)] pld [r0, #(CACHE_LINE_SIZE * 0)]
pld [r0, #(CACHE_LINE_SIZE * 1)] pld [r0, #(CACHE_LINE_SIZE * 1)]
@ -163,25 +161,25 @@ ENTRY(memcmp)
eors r0, r0, ip eors r0, r0, ip
ldreq r0, [r4], #4 ldreq r0, [r4], #4
ldreq ip, [r1, #4]! ldreq ip, [r1, #4]!
eorseq r0, r0, lr eoreqs r0, r0, lr
ldreq r0, [r4], #4 ldreq r0, [r4], #4
ldreq lr, [r1, #4]! ldreq lr, [r1, #4]!
eorseq r0, r0, ip eoreqs r0, r0, ip
ldreq r0, [r4], #4 ldreq r0, [r4], #4
ldreq ip, [r1, #4]! ldreq ip, [r1, #4]!
eorseq r0, r0, lr eoreqs r0, r0, lr
ldreq r0, [r4], #4 ldreq r0, [r4], #4
ldreq lr, [r1, #4]! ldreq lr, [r1, #4]!
eorseq r0, r0, ip eoreqs r0, r0, ip
ldreq r0, [r4], #4 ldreq r0, [r4], #4
ldreq ip, [r1, #4]! ldreq ip, [r1, #4]!
eorseq r0, r0, lr eoreqs r0, r0, lr
ldreq r0, [r4], #4 ldreq r0, [r4], #4
ldreq lr, [r1, #4]! ldreq lr, [r1, #4]!
eorseq r0, r0, ip eoreqs r0, r0, ip
ldreq r0, [r4], #4 ldreq r0, [r4], #4
ldreq ip, [r1, #4]! ldreq ip, [r1, #4]!
eorseq r0, r0, lr eoreqs r0, r0, lr
bne 2f bne 2f
subs r2, r2, #32 subs r2, r2, #32
bhs 0b bhs 0b
@ -221,7 +219,8 @@ ENTRY(memcmp)
bne 8b bne 8b
9: /* restore registers and return */ 9: /* restore registers and return */
ldmfd sp!, {r4, pc} ldmfd sp!, {r4, lr}
bx lr
10: /* process less than 12 bytes */ 10: /* process less than 12 bytes */
cmp r2, #0 cmp r2, #0
@ -264,17 +263,17 @@ ENTRY(memcmp)
ldreq lr, [r1], #4 ldreq lr, [r1], #4
ldreq r0, [r4], #4 ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16 orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip eoreqs r0, r0, ip
moveq ip, lr, lsr #16 moveq ip, lr, lsr #16
ldreq lr, [r1], #4 ldreq lr, [r1], #4
ldreq r0, [r4], #4 ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16 orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip eoreqs r0, r0, ip
moveq ip, lr, lsr #16 moveq ip, lr, lsr #16
ldreq lr, [r1], #4 ldreq lr, [r1], #4
ldreq r0, [r4], #4 ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16 orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip eoreqs r0, r0, ip
bne 7f bne 7f
subs r2, r2, #16 subs r2, r2, #16
bhs 6b bhs 6b
@ -318,7 +317,7 @@ ENTRY(memcmp)
ldreq r7, [r1], #4 ldreq r7, [r1], #4
ldreq r0, [r4], #4 ldreq r0, [r4], #4
orreq ip, ip, r7, lsl r6 orreq ip, ip, r7, lsl r6
eorseq r0, r0, ip eoreqs r0, r0, ip
bne 7f bne 7f
subs r2, r2, #8 subs r2, r2, #8
bhs 6b bhs 6b

View File

@ -37,8 +37,6 @@
* so we have to preserve R0. * so we have to preserve R0.
*/ */
.syntax unified
ENTRY(__memcpy_chk) ENTRY(__memcpy_chk)
cmp r2, r3 cmp r2, r3
bhi __memcpy_chk_fail bhi __memcpy_chk_fail
@ -83,12 +81,12 @@ ENTRY(memcpy)
*/ */
movs r12, r3, lsl #31 movs r12, r3, lsl #31
sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */ sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
ldrbmi r3, [r1], #1 ldrmib r3, [r1], #1
ldrbcs r4, [r1], #1 ldrcsb r4, [r1], #1
ldrbcs r12,[r1], #1 ldrcsb r12,[r1], #1
strbmi r3, [r0], #1 strmib r3, [r0], #1
strbcs r4, [r0], #1 strcsb r4, [r0], #1
strbcs r12,[r0], #1 strcsb r12,[r0], #1
.Lsrc_aligned: .Lsrc_aligned:
@ -111,10 +109,10 @@ ENTRY(memcpy)
/* conditionally copies 0 to 7 words (length in r3) */ /* conditionally copies 0 to 7 words (length in r3) */
movs r12, r3, lsl #28 movs r12, r3, lsl #28
ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */ ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmi r1!, {r8, r9} /* 8 bytes */ ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcs r0!, {r4, r5, r6, r7} stmcsia r0!, {r4, r5, r6, r7}
stmmi r0!, {r8, r9} stmmiia r0!, {r8, r9}
tst r3, #0x4 tst r3, #0x4
ldrne r10,[r1], #4 /* 4 bytes */ ldrne r10,[r1], #4 /* 4 bytes */
strne r10,[r0], #4 strne r10,[r0], #4
@ -179,22 +177,23 @@ ENTRY(memcpy)
/* conditionnaly copies 0 to 31 bytes */ /* conditionnaly copies 0 to 31 bytes */
movs r12, r2, lsl #28 movs r12, r2, lsl #28
ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */ ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmi r1!, {r8, r9} /* 8 bytes */ ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcs r0!, {r4, r5, r6, r7} stmcsia r0!, {r4, r5, r6, r7}
stmmi r0!, {r8, r9} stmmiia r0!, {r8, r9}
movs r12, r2, lsl #30 movs r12, r2, lsl #30
ldrcs r3, [r1], #4 /* 4 bytes */ ldrcs r3, [r1], #4 /* 4 bytes */
ldrhmi r4, [r1], #2 /* 2 bytes */ ldrmih r4, [r1], #2 /* 2 bytes */
strcs r3, [r0], #4 strcs r3, [r0], #4
strhmi r4, [r0], #2 strmih r4, [r0], #2
tst r2, #0x1 tst r2, #0x1
ldrbne r3, [r1] /* last byte */ ldrneb r3, [r1] /* last byte */
strbne r3, [r0] strneb r3, [r0]
/* we're done! restore everything and return */ /* we're done! restore everything and return */
1: ldmfd sp!, {r5-r11} 1: ldmfd sp!, {r5-r11}
ldmfd sp!, {r0, r4, pc} ldmfd sp!, {r0, r4, lr}
bx lr
/********************************************************************/ /********************************************************************/
@ -229,11 +228,11 @@ ENTRY(memcpy)
* becomes aligned to 32 bits (r5 = nb of words to copy for alignment) * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
*/ */
movs r5, r5, lsl #31 movs r5, r5, lsl #31
strbmi r3, [r0], #1 strmib r3, [r0], #1
movmi r3, r3, lsr #8 movmi r3, r3, lsr #8
strbcs r3, [r0], #1 strcsb r3, [r0], #1
movcs r3, r3, lsr #8 movcs r3, r3, lsr #8
strbcs r3, [r0], #1 strcsb r3, [r0], #1
movcs r3, r3, lsr #8 movcs r3, r3, lsr #8
cmp r2, #4 cmp r2, #4
@ -364,27 +363,28 @@ ENTRY(memcpy)
.Lpartial_word_tail: .Lpartial_word_tail:
/* we have a partial word in the input buffer */ /* we have a partial word in the input buffer */
movs r5, lr, lsl #(31-3) movs r5, lr, lsl #(31-3)
strbmi r3, [r0], #1 strmib r3, [r0], #1
movmi r3, r3, lsr #8 movmi r3, r3, lsr #8
strbcs r3, [r0], #1 strcsb r3, [r0], #1
movcs r3, r3, lsr #8 movcs r3, r3, lsr #8
strbcs r3, [r0], #1 strcsb r3, [r0], #1
/* Refill spilled registers from the stack. Don't update sp. */ /* Refill spilled registers from the stack. Don't update sp. */
ldmfd sp, {r5-r11} ldmfd sp, {r5-r11}
.Lcopy_last_3_and_return: .Lcopy_last_3_and_return:
movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */ movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
ldrbmi r2, [r1], #1 ldrmib r2, [r1], #1
ldrbcs r3, [r1], #1 ldrcsb r3, [r1], #1
ldrbcs r12,[r1] ldrcsb r12,[r1]
strbmi r2, [r0], #1 strmib r2, [r0], #1
strbcs r3, [r0], #1 strcsb r3, [r0], #1
strbcs r12,[r0] strcsb r12,[r0]
/* we're done! restore sp and spilled registers and return */ /* we're done! restore sp and spilled registers and return */
add sp, sp, #28 add sp, sp, #28
ldmfd sp!, {r0, r4, pc} ldmfd sp!, {r0, r4, lr}
bx lr
END(memcpy) END(memcpy)
// Only reached when the __memcpy_chk check fails. // Only reached when the __memcpy_chk check fails.

View File

@ -35,8 +35,6 @@
* memset() returns its first argument. * memset() returns its first argument.
*/ */
.syntax unified
ENTRY(__memset_chk) ENTRY(__memset_chk)
cmp r2, r3 cmp r2, r3
bls done bls done
@ -78,11 +76,12 @@ ENTRY(memset)
orr r1, r1, r1, lsr #16 orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31 movs r12, r3, lsl #31
strbcs r1, [r0], #1 /* can't use strh (alignment unknown) */ strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strbcs r1, [r0], #1 strcsb r1, [r0], #1
strbmi r1, [r0], #1 strmib r1, [r0], #1
subs r2, r2, r3 subs r2, r2, r3
popls {r0, r4-r7, pc} /* return */ ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */ /* align the destination to a cache-line */
mov r12, r1 mov r12, r1
@ -101,9 +100,9 @@ ENTRY(memset)
/* conditionally writes 0 to 7 words (length in r3) */ /* conditionally writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28 movs r3, r3, lsl #28
stmcs r0!, {r1, lr} stmcsia r0!, {r1, lr}
stmcs r0!, {r1, lr} stmcsia r0!, {r1, lr}
stmmi r0!, {r1, lr} stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2 movs r3, r3, lsl #2
strcs r1, [r0], #4 strcs r1, [r0], #4
@ -118,14 +117,15 @@ ENTRY(memset)
/* conditionally stores 0 to 31 bytes */ /* conditionally stores 0 to 31 bytes */
movs r2, r2, lsl #28 movs r2, r2, lsl #28
stmcs r0!, {r1,r3,r12,lr} stmcsia r0!, {r1,r3,r12,lr}
stmmi r0!, {r1, lr} stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2 movs r2, r2, lsl #2
strcs r1, [r0], #4 strcs r1, [r0], #4
strhmi r1, [r0], #2 strmih r1, [r0], #2
movs r2, r2, lsl #2 movs r2, r2, lsl #2
strbcs r1, [r0] strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, pc} ldmfd sp!, {r0, r4-r7, lr}
bx lr
END(memset) END(memset)
.data .data

View File

@ -32,8 +32,6 @@
#include <machine/cpu-features.h> #include <machine/cpu-features.h>
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
.syntax unified
ENTRY(strcpy) ENTRY(strcpy)
pld [r1, #0] pld [r1, #0]
eor r2, r0, r1 eor r2, r0, r1
@ -110,15 +108,15 @@ ENTRY(strcpy)
#ifdef __ARMEB__ #ifdef __ARMEB__
tst r2, #0xff00 tst r2, #0xff00
iteet ne iteet ne
strhne r2, [ip], #2 strneh r2, [ip], #2
lsreq r2, r2, #8 lsreq r2, r2, #8
strbeq r2, [ip] streqb r2, [ip]
tstne r2, #0xff tstne r2, #0xff
#else #else
tst r2, #0xff tst r2, #0xff
itet ne itet ne
strhne r2, [ip], #2 strneh r2, [ip], #2
strbeq r2, [ip] streqb r2, [ip]
tstne r2, #0xff00 tstne r2, #0xff00
#endif #endif
bne 5b bne 5b

View File

@ -0,0 +1,14 @@
libc_bionic_src_files_arm += \
arch-arm/generic/bionic/memcmp.S \
arch-arm/generic/bionic/memcpy.S \
arch-arm/generic/bionic/memset.S \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
libc_openbsd_src_files_arm += \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \

View File

@ -40,7 +40,7 @@
ENTRY(__strcat_chk) ENTRY(__strcat_chk)
pld [r0, #0] pld [r0, #0]
push {r0, lr} push {r0, lr}
.cfi_adjust_cfa_offset 8 .cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0 .cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4 .cfi_rel_offset lr, 4
push {r4, r5} push {r4, r5}
@ -177,7 +177,7 @@ ENTRY(__strcat_chk)
.L_strlen_done: .L_strlen_done:
add r2, r3, r4 add r2, r3, r4
cmp r2, lr cmp r2, lr
bhi .L_strcat_chk_failed bhi __strcat_chk_failed
// Set up the registers for the memcpy code. // Set up the registers for the memcpy code.
mov r1, r5 mov r1, r5
@ -185,17 +185,20 @@ ENTRY(__strcat_chk)
mov r2, r4 mov r2, r4
add r0, r0, r3 add r0, r0, r3
pop {r4, r5} pop {r4, r5}
.cfi_adjust_cfa_offset -8 END(__strcat_chk)
.cfi_restore r4
.cfi_restore r5
#define MEMCPY_BASE __strcat_chk_memcpy_base
#define MEMCPY_BASE_ALIGNED __strcat_chk_memcpy_base_aligned
#include "memcpy_base.S" #include "memcpy_base.S"
// Undo the above cfi directives. ENTRY_PRIVATE(__strcat_chk_failed)
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
.cfi_adjust_cfa_offset 8 .cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0 .cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4 .cfi_rel_offset r5, 4
.L_strcat_chk_failed:
ldr r0, error_message ldr r0, error_message
ldr r1, error_code ldr r1, error_code
1: 1:
@ -205,7 +208,7 @@ error_code:
.word BIONIC_EVENT_STRCAT_BUFFER_OVERFLOW .word BIONIC_EVENT_STRCAT_BUFFER_OVERFLOW
error_message: error_message:
.word error_string-(1b+4) .word error_string-(1b+4)
END(__strcat_chk) END(__strcat_chk_failed)
.data .data
error_string: error_string:

View File

@ -39,7 +39,7 @@
ENTRY(__strcpy_chk) ENTRY(__strcpy_chk)
pld [r0, #0] pld [r0, #0]
push {r0, lr} push {r0, lr}
.cfi_adjust_cfa_offset 8 .cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0 .cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4 .cfi_rel_offset lr, 4
@ -149,14 +149,21 @@ ENTRY(__strcpy_chk)
pld [r1, #64] pld [r1, #64]
ldr r0, [sp] ldr r0, [sp]
cmp r3, lr cmp r3, lr
bhs .L_strcpy_chk_failed bhs __strcpy_chk_failed
// Add 1 for copy length to get the string terminator. // Add 1 for copy length to get the string terminator.
add r2, r3, #1 add r2, r3, #1
END(__strcpy_chk)
#define MEMCPY_BASE __strcpy_chk_memcpy_base
#define MEMCPY_BASE_ALIGNED __strcpy_chk_memcpy_base_aligned
#include "memcpy_base.S" #include "memcpy_base.S"
.L_strcpy_chk_failed: ENTRY_PRIVATE(__strcpy_chk_failed)
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
ldr r0, error_message ldr r0, error_message
ldr r1, error_code ldr r1, error_code
1: 1:
@ -166,7 +173,7 @@ error_code:
.word BIONIC_EVENT_STRCPY_BUFFER_OVERFLOW .word BIONIC_EVENT_STRCPY_BUFFER_OVERFLOW
error_message: error_message:
.word error_string-(1b+4) .word error_string-(1b+4)
END(__strcpy_chk) END(__strcpy_chk_failed)
.data .data
error_string: error_string:

View File

@ -45,7 +45,7 @@
ENTRY(__memcpy_chk) ENTRY(__memcpy_chk)
cmp r2, r3 cmp r2, r3
bhi .L_memcpy_chk_fail bhi __memcpy_chk_fail
// Fall through to memcpy... // Fall through to memcpy...
END(__memcpy_chk) END(__memcpy_chk)
@ -53,20 +53,19 @@ END(__memcpy_chk)
ENTRY(memcpy) ENTRY(memcpy)
pld [r1, #64] pld [r1, #64]
stmfd sp!, {r0, lr} stmfd sp!, {r0, lr}
.cfi_adjust_cfa_offset 8 .cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0 .cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4 .cfi_rel_offset lr, 4
END(memcpy)
#define MEMCPY_BASE __memcpy_base
#define MEMCPY_BASE_ALIGNED __memcpy_base_aligned
#include "memcpy_base.S" #include "memcpy_base.S"
// Undo the cfi directives from above. ENTRY_PRIVATE(__memcpy_chk_fail)
.cfi_adjust_cfa_offset -8
.cfi_restore r0
.cfi_restore lr
.L_memcpy_chk_fail:
// Preserve lr for backtrace. // Preserve lr for backtrace.
push {lr} push {lr}
.cfi_adjust_cfa_offset 4 .cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0 .cfi_rel_offset lr, 0
ldr r0, error_message ldr r0, error_message
@ -78,7 +77,7 @@ error_code:
.word BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW .word BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
error_message: error_message:
.word error_string-(1b+4) .word error_string-(1b+4)
END(memcpy) END(__memcpy_chk_fail)
.data .data
error_string: error_string:

View File

@ -1,191 +1,123 @@
/*************************************************************************** /*
Copyright (c) 2009-2013 The Linux Foundation. All rights reserved. * Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of The Linux Foundation nor the names of its contributors may
be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /*
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * This code assumes it is running on a processor that supports all arm v7
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * instructions, that supports neon instructions, and that has a 32 byte
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * cache line.
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR */
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
/* Assumes neon instructions and a cache line size of 64 bytes. */ // Assumes neon instructions and a cache line size of 32 bytes.
#include <machine/cpu-features.h> ENTRY_PRIVATE(MEMCPY_BASE)
#include <machine/asm.h> .cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
#define PLDOFFS (10) /* do we have at least 16-bytes to copy (needed for alignment below) */
#define PLDTHRESH (PLDOFFS)
#define BBTHRESH (4096/64)
#define PLDSIZE (64)
#if (PLDOFFS < 1)
#error Routine does not support offsets less than 1
#endif
#if (PLDTHRESH < PLDOFFS)
#error PLD threshold must be greater than or equal to the PLD offset
#endif
.text
.fpu neon
.L_memcpy_base:
cmp r2, #4
blt .L_neon_lt4
cmp r2, #16 cmp r2, #16
blt .L_neon_lt16 blo 5f
cmp r2, #32
blt .L_neon_16
cmp r2, #64
blt .L_neon_copy_32_a
mov r12, r2, lsr #6 /* align destination to cache-line for the write-buffer */
cmp r12, #PLDTHRESH rsb r3, r0, #0
ble .L_neon_copy_64_loop_nopld ands r3, r3, #0xF
beq 2f
push {r9, r10} /* copy up to 15-bytes (count in r3) */
.cfi_adjust_cfa_offset 8 sub r2, r2, r3
.cfi_rel_offset r9, 0 movs ip, r3, lsl #31
.cfi_rel_offset r10, 4 itt mi
ldrbmi lr, [r1], #1
strbmi lr, [r0], #1
itttt cs
ldrbcs ip, [r1], #1
ldrbcs lr, [r1], #1
strbcs ip, [r0], #1
strbcs lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// copies 4 bytes, destination 32-bits aligned
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
1: bcc 2f
// copies 8 bytes, destination 64-bits aligned
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
cmp r12, #BBTHRESH 2: /* make sure we have at least 64 bytes to copy */
ble .L_neon_prime_pump subs r2, r2, #64
blo 2f
add lr, r0, #0x400 1: /* The main loop copies 64 bytes at a time */
add r9, r1, #(PLDOFFS*PLDSIZE) vld1.8 {d0 - d3}, [r1]!
sub lr, lr, r9 vld1.8 {d4 - d7}, [r1]!
lsl lr, lr, #21 pld [r1, #(32*8)]
lsr lr, lr, #21 subs r2, r2, #64
add lr, lr, #(PLDOFFS*PLDSIZE) vst1.8 {d0 - d3}, [r0, :128]!
cmp r12, lr, lsr #6 vst1.8 {d4 - d7}, [r0, :128]!
ble .L_neon_prime_pump bhs 1b
itt gt 2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
movgt r9, #(PLDOFFS) adds r2, r2, #32
rsbsgt r9, r9, lr, lsr #6 blo 4f
ble .L_neon_prime_pump
add r10, r1, lr /* Copy 32 bytes. These cache lines were already preloaded */
bic r10, #0x3F vld1.8 {d0 - d3}, [r1]!
sub r2, r2, #32
vst1.8 {d0 - d3}, [r0, :128]!
sub r12, r12, lr, lsr #6 4: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 5f
// copies 16 bytes, 128-bits aligned
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0, :128]!
cmp r9, r12 5: /* copy up to 15-bytes (count in r2) */
itee le movs ip, r2, lsl #29
suble r12, r12, r9
movgt r9, r12
movgt r12, #0
pld [r1, #((PLDOFFS-1)*PLDSIZE)]
.L_neon_copy_64_loop_outer_doublepld:
pld [r1, #((PLDOFFS)*PLDSIZE)]
vld1.32 {q0, q1}, [r1]!
vld1.32 {q2, q3}, [r1]!
ldr r3, [r10]
subs r9, r9, #1
vst1.32 {q0, q1}, [r0]!
vst1.32 {q2, q3}, [r0]!
add r10, #64
bne .L_neon_copy_64_loop_outer_doublepld
cmp r12, #0
beq .L_neon_pop_before_nopld
cmp r12, #(512*1024/64)
blt .L_neon_copy_64_loop_outer
.L_neon_copy_64_loop_ddr:
vld1.32 {q0, q1}, [r1]!
vld1.32 {q2, q3}, [r1]!
pld [r10]
subs r12, r12, #1
vst1.32 {q0, q1}, [r0]!
vst1.32 {q2, q3}, [r0]!
add r10, #64
bne .L_neon_copy_64_loop_ddr
b .L_neon_pop_before_nopld
.L_neon_prime_pump:
mov lr, #(PLDOFFS*PLDSIZE)
add r10, r1, #(PLDOFFS*PLDSIZE)
bic r10, #0x3F
sub r12, r12, #PLDOFFS
ldr r3, [r10, #(-1*PLDSIZE)]
.L_neon_copy_64_loop_outer:
vld1.32 {q0, q1}, [r1]!
vld1.32 {q2, q3}, [r1]!
ldr r3, [r10]
subs r12, r12, #1
vst1.32 {q0, q1}, [r0]!
vst1.32 {q2, q3}, [r0]!
add r10, #64
bne .L_neon_copy_64_loop_outer
.L_neon_pop_before_nopld:
mov r12, lr, lsr #6
pop {r9, r10}
.cfi_adjust_cfa_offset -8
.cfi_restore r9
.cfi_restore r10
.L_neon_copy_64_loop_nopld:
vld1.32 {q8, q9}, [r1]!
vld1.32 {q10, q11}, [r1]!
subs r12, r12, #1
vst1.32 {q8, q9}, [r0]!
vst1.32 {q10, q11}, [r0]!
bne .L_neon_copy_64_loop_nopld
ands r2, r2, #0x3f
beq .L_neon_exit
.L_neon_copy_32_a:
movs r3, r2, lsl #27
bcc .L_neon_16
vld1.32 {q0,q1}, [r1]!
vst1.32 {q0,q1}, [r0]!
.L_neon_16:
bpl .L_neon_lt16
vld1.32 {q8}, [r1]!
vst1.32 {q8}, [r0]!
ands r2, r2, #0x0f
beq .L_neon_exit
.L_neon_lt16:
movs r3, r2, lsl #29
bcc 1f bcc 1f
vld1.8 {d0}, [r1]! vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]! vst1.8 {d0}, [r0]!
1: 1: bge 2f
bge .L_neon_lt4
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]! vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]! vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
2: movs ip, r2, lsl #31
.L_neon_lt4:
movs r2, r2, lsl #31
itt cs
ldrhcs r3, [r1], #2
strhcs r3, [r0], #2
itt mi itt mi
ldrbmi r3, [r1] ldrbmi r3, [r1], #1
strbmi r3, [r0] strbmi r3, [r0], #1
itttt cs
ldrbcs ip, [r1], #1
ldrbcs lr, [r1], #1
strbcs ip, [r0], #1
strbcs lr, [r0], #1
.L_neon_exit: ldmfd sp!, {r0, lr}
pop {r0, pc} bx lr
END(MEMCPY_BASE)

View File

@ -37,7 +37,6 @@
*/ */
.fpu neon .fpu neon
.syntax unified
ENTRY(__memset_chk) ENTRY(__memset_chk)
cmp r2, r3 cmp r2, r3
@ -69,7 +68,10 @@ END(bzero)
/* memset() returns its first argument. */ /* memset() returns its first argument. */
ENTRY(memset) ENTRY(memset)
mov r3, r0 stmfd sp!, {r0}
.cfi_def_cfa_offset 4
.cfi_rel_offset r0, 0
vdup.8 q0, r1 vdup.8 q0, r1
/* make sure we have at least 32 bytes to write */ /* make sure we have at least 32 bytes to write */
@ -79,7 +81,7 @@ ENTRY(memset)
1: /* The main loop writes 32 bytes at a time */ 1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32 subs r2, r2, #32
vst1.8 {d0 - d3}, [r3]! vst1.8 {d0 - d3}, [r0]!
bhs 1b bhs 1b
2: /* less than 32 left */ 2: /* less than 32 left */
@ -88,17 +90,18 @@ ENTRY(memset)
beq 3f beq 3f
// writes 16 bytes, 128-bits aligned // writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r3]! vst1.8 {d0, d1}, [r0]!
3: /* write up to 15-bytes (count in r2) */ 3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29 movs ip, r2, lsl #29
bcc 1f bcc 1f
vst1.8 {d0}, [r3]! vst1.8 {d0}, [r0]!
1: bge 2f 1: bge 2f
vst1.32 {d0[0]}, [r3]! vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31 2: movs ip, r2, lsl #31
strbmi r1, [r3], #1 strmib r1, [r0], #1
strbcs r1, [r3], #1 strcsb r1, [r0], #1
strbcs r1, [r3], #1 strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr bx lr
END(memset) END(memset)

View File

@ -1,18 +1,3 @@
libc_openbsd_src_files_exclude_arm += \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
libc_bionic_src_files_exclude_arm += \
arch-arm/generic/bionic/memcpy.S \
arch-arm/generic/bionic/memset.S \
arch-arm/generic/bionic/strcmp.S \
arch-arm/generic/bionic/strcpy.S \
arch-arm/generic/bionic/strlen.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
libc_bionic_src_files_arm += \ libc_bionic_src_files_arm += \
arch-arm/krait/bionic/memcpy.S \ arch-arm/krait/bionic/memcpy.S \
arch-arm/krait/bionic/memset.S \ arch-arm/krait/bionic/memset.S \
@ -27,5 +12,8 @@ libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/strcpy.S \ arch-arm/cortex-a15/bionic/strcpy.S \
arch-arm/cortex-a15/bionic/strlen.S \ arch-arm/cortex-a15/bionic/strlen.S \
libc_bionic_src_files_arm += \
arch-arm/generic/bionic/memcmp.S \
libc_bionic_src_files_arm += \ libc_bionic_src_files_arm += \
arch-arm/denver/bionic/memmove.S \ arch-arm/denver/bionic/memmove.S \

View File

@ -1,23 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(___mremap)
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
ldmfd ip, {r4, r5, r6}
ldr r7, =__NR_mremap
swi #0
ldmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 0
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(___mremap)
.hidden ___mremap

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___clock_nanosleep) ENTRY(__clock_nanosleep)
mov ip, r7 mov ip, r7
ldr r7, =__NR_clock_nanosleep ldr r7, =__NR_clock_nanosleep
swi #0 swi #0
@ -11,5 +11,4 @@ ENTRY(___clock_nanosleep)
bxls lr bxls lr
neg r0, r0 neg r0, r0
b __set_errno_internal b __set_errno_internal
END(___clock_nanosleep) END(__clock_nanosleep)
.hidden ___clock_nanosleep

View File

@ -1,22 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(__preadv64)
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
ldmfd ip, {r4, r5, r6}
ldr r7, =__NR_preadv
swi #0
ldmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 0
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(__preadv64)

View File

@ -1,22 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(__pwritev64)
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
ldmfd ip, {r4, r5, r6}
ldr r7, =__NR_pwritev
swi #0
ldmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 0
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(__pwritev64)

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___rt_sigqueueinfo) ENTRY(__rt_sigqueueinfo)
mov ip, r7 mov ip, r7
ldr r7, =__NR_rt_sigqueueinfo ldr r7, =__NR_rt_sigqueueinfo
swi #0 swi #0
@ -11,5 +11,4 @@ ENTRY(___rt_sigqueueinfo)
bxls lr bxls lr
neg r0, r0 neg r0, r0
b __set_errno_internal b __set_errno_internal
END(___rt_sigqueueinfo) END(__rt_sigqueueinfo)
.hidden ___rt_sigqueueinfo

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___fgetxattr) ENTRY(fgetxattr)
mov ip, r7 mov ip, r7
ldr r7, =__NR_fgetxattr ldr r7, =__NR_fgetxattr
swi #0 swi #0
@ -11,5 +11,4 @@ ENTRY(___fgetxattr)
bxls lr bxls lr
neg r0, r0 neg r0, r0
b __set_errno_internal b __set_errno_internal
END(___fgetxattr) END(fgetxattr)
.hidden ___fgetxattr

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___flistxattr) ENTRY(flistxattr)
mov ip, r7 mov ip, r7
ldr r7, =__NR_flistxattr ldr r7, =__NR_flistxattr
swi #0 swi #0
@ -11,5 +11,4 @@ ENTRY(___flistxattr)
bxls lr bxls lr
neg r0, r0 neg r0, r0
b __set_errno_internal b __set_errno_internal
END(___flistxattr) END(flistxattr)
.hidden ___flistxattr

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___fsetxattr) ENTRY(fsetxattr)
mov ip, sp mov ip, sp
stmfd sp!, {r4, r5, r6, r7} stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16 .cfi_def_cfa_offset 16
@ -19,5 +19,4 @@ ENTRY(___fsetxattr)
bxls lr bxls lr
neg r0, r0 neg r0, r0
b __set_errno_internal b __set_errno_internal
END(___fsetxattr) END(fsetxattr)
.hidden ___fsetxattr

View File

@ -0,0 +1,14 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(ftruncate)
mov ip, r7
ldr r7, =__NR_ftruncate
swi #0
mov r7, ip
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(ftruncate)

View File

@ -0,0 +1,14 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(mremap)
mov ip, r7
ldr r7, =__NR_mremap
swi #0
mov r7, ip
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(mremap)

View File

@ -1,22 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_readv)
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
ldmfd ip, {r4, r5, r6}
ldr r7, =__NR_process_vm_readv
swi #0
ldmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 0
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(process_vm_readv)

View File

@ -1,22 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_writev)
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
ldmfd ip, {r4, r5, r6}
ldr r7, =__NR_process_vm_writev
swi #0
ldmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 0
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(process_vm_writev)

View File

@ -0,0 +1,14 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(vfork)
mov ip, r7
ldr r7, =__NR_vfork
swi #0
mov r7, ip
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(vfork)

View File

@ -1,38 +1,32 @@
# 64-bit arm. # 64-bit arm.
# #
# Generic arm64 optimizations, may be overriden by CPU variants. # Default implementations of functions that are commonly optimized.
# #
libc_bionic_src_files_arm64 += \ libc_bionic_src_files_arm64 += \
arch-arm64/generic/bionic/memchr.S \ bionic/__memset_chk.cpp \
arch-arm64/generic/bionic/memcmp.S \ bionic/__strcpy_chk.cpp \
arch-arm64/generic/bionic/memcpy.S \ bionic/__strcat_chk.cpp \
arch-arm64/generic/bionic/memmove.S \ bionic/strrchr.cpp \
arch-arm64/generic/bionic/memset.S \
arch-arm64/generic/bionic/stpcpy.S \
arch-arm64/generic/bionic/strchr.S \
arch-arm64/generic/bionic/strcmp.S \
arch-arm64/generic/bionic/strcpy.S \
arch-arm64/generic/bionic/strlen.S \
arch-arm64/generic/bionic/strncmp.S \
arch-arm64/generic/bionic/strnlen.S \
arch-arm64/generic/bionic/wmemmove.S \
libc_bionic_src_files_exclude_arm64 += \ libc_freebsd_src_files_arm64 += \
bionic/__memcpy_chk.cpp \ upstream-freebsd/lib/libc/string/wcscat.c \
bionic/strchr.cpp \ upstream-freebsd/lib/libc/string/wcschr.c \
bionic/strnlen.c \ upstream-freebsd/lib/libc/string/wcscmp.c \
upstream-freebsd/lib/libc/string/wcscpy.c \
upstream-freebsd/lib/libc/string/wcslen.c \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
libc_freebsd_src_files_exclude_arm64 += \ libc_openbsd_src_files_arm64 += \
upstream-freebsd/lib/libc/string/wmemmove.c \ upstream-openbsd/lib/libc/string/memrchr.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
libc_openbsd_src_files_exclude_arm64 += \ upstream-openbsd/lib/libc/string/strcat.c \
upstream-openbsd/lib/libc/string/memchr.c \ upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/memmove.c \ upstream-openbsd/lib/libc/string/strlcpy.c \
upstream-openbsd/lib/libc/string/stpcpy.c \ upstream-openbsd/lib/libc/string/strncat.c \
upstream-openbsd/lib/libc/string/strcpy.c \ upstream-openbsd/lib/libc/string/strncpy.c \
upstream-openbsd/lib/libc/string/strncmp.c \
# #
# Inherently architecture-specific code. # Inherently architecture-specific code.
@ -42,9 +36,12 @@ libc_bionic_src_files_arm64 += \
arch-arm64/bionic/__bionic_clone.S \ arch-arm64/bionic/__bionic_clone.S \
arch-arm64/bionic/_exit_with_stack_teardown.S \ arch-arm64/bionic/_exit_with_stack_teardown.S \
arch-arm64/bionic/setjmp.S \ arch-arm64/bionic/setjmp.S \
arch-arm64/bionic/__set_tls.c \
arch-arm64/bionic/syscall.S \ arch-arm64/bionic/syscall.S \
arch-arm64/bionic/vfork.S \ arch-arm64/bionic/vfork.S \
# Work around for http://b/20065774.
libc_bionic_src_files_arm64 += arch-arm64/bionic/libgcc_compat.c
libc_crt_target_cflags_arm64 := \ libc_crt_target_cflags_arm64 := \
-I$(LOCAL_PATH)/arch-arm64/include -I$(LOCAL_PATH)/arch-arm64/include
@ -59,7 +56,6 @@ libc_crt_target_crtbegin_so_file_arm64 := \
ifeq ($(strip $(TARGET_CPU_VARIANT)),) ifeq ($(strip $(TARGET_CPU_VARIANT)),)
$(warning TARGET_ARCH is arm64, but TARGET_CPU_VARIANT is not defined) $(warning TARGET_ARCH is arm64, but TARGET_CPU_VARIANT is not defined)
endif endif
ifneq ($(TARGET_CPU_VARIANT),generic)
cpu_variant_mk := $(LOCAL_PATH)/arch-arm64/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk cpu_variant_mk := $(LOCAL_PATH)/arch-arm64/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk
ifeq ($(wildcard $(cpu_variant_mk)),) ifeq ($(wildcard $(cpu_variant_mk)),)
$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.") $(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
@ -68,4 +64,3 @@ include $(cpu_variant_mk)
libc_common_additional_dependencies += $(cpu_variant_mk) libc_common_additional_dependencies += $(cpu_variant_mk)
cpu_variant_mk := cpu_variant_mk :=
endif

View File

@ -0,0 +1,15 @@
/* STOPSHIP: remove this once the flounder blobs have been rebuilt (http://b/20065774). */
#if !defined(__clang__)
extern void __clear_cache(char*, char*);
extern char _Unwind_Backtrace;
extern char _Unwind_GetIP;
void* __bionic_libgcc_compat_symbols[] = {
&__clear_cache,
&_Unwind_Backtrace,
&_Unwind_GetIP,
};
#endif

View File

@ -52,29 +52,6 @@
#define _JB_D10_D11 (_JB_D12_D13 + 2) #define _JB_D10_D11 (_JB_D12_D13 + 2)
#define _JB_D8_D9 (_JB_D10_D11 + 2) #define _JB_D8_D9 (_JB_D10_D11 + 2)
#define MANGLE_REGISTERS 1
.macro m_mangle_registers reg, sp_reg
#if MANGLE_REGISTERS
eor x19, x19, \reg
eor x20, x20, \reg
eor x21, x21, \reg
eor x22, x22, \reg
eor x23, x23, \reg
eor x24, x24, \reg
eor x25, x25, \reg
eor x26, x26, \reg
eor x27, x27, \reg
eor x28, x28, \reg
eor x29, x29, \reg
eor x30, x30, \reg
eor \sp_reg, \sp_reg, \reg
#endif
.endm
.macro m_unmangle_registers reg, sp_reg
m_mangle_registers \reg, sp_reg=\sp_reg
.endm
ENTRY(setjmp) ENTRY(setjmp)
mov w1, #1 mov w1, #1
b sigsetjmp b sigsetjmp
@ -87,47 +64,23 @@ END(_setjmp)
// int sigsetjmp(sigjmp_buf env, int save_signal_mask); // int sigsetjmp(sigjmp_buf env, int save_signal_mask);
ENTRY(sigsetjmp) ENTRY(sigsetjmp)
stp x0, x30, [sp, #-16]! // Record whether or not we're saving the signal mask.
.cfi_def_cfa_offset 16 str w1, [x0, #(_JB_SIGFLAG * 8)]
.cfi_rel_offset x0, 0
.cfi_rel_offset x30, 8
// Get the cookie and store it along with the signal flag.
mov x0, x1
bl __bionic_setjmp_cookie_get
mov x1, x0
ldr x0, [sp, #0]
str x1, [x0, #(_JB_SIGFLAG * 8)]
// Do we need to save the signal mask? // Do we need to save the signal mask?
tbz w1, #0, 1f cbz w1, 1f
// Save the cookie for later.
stp x1, xzr, [sp, #-16]!
.cfi_adjust_cfa_offset 16
// Save current signal mask. // Save current signal mask.
stp x0, x30, [sp, #-16]!
// The 'how' argument is ignored if new_mask is NULL. // The 'how' argument is ignored if new_mask is NULL.
mov x1, #0 // NULL. mov x1, #0 // NULL.
add x2, x0, #(_JB_SIGMASK * 8) // old_mask. add x2, x0, #(_JB_SIGMASK * 8) // old_mask.
bl sigprocmask bl sigprocmask
ldp x0, x30, [sp], #16
ldp x1, xzr, [sp], #16
.cfi_adjust_cfa_offset -16
1: 1:
// Restore original x0 and lr.
ldp x0, x30, [sp], #16
.cfi_adjust_cfa_offset -16
.cfi_restore x0
.cfi_restore x30
// Mask off the signal flag bit.
bic x1, x1, #1
// Save core registers. // Save core registers.
mov x10, sp mov x10, sp
m_mangle_registers x1, sp_reg=x10
stp x30, x10, [x0, #(_JB_X30_SP * 8)] stp x30, x10, [x0, #(_JB_X30_SP * 8)]
stp x28, x29, [x0, #(_JB_X28_X29 * 8)] stp x28, x29, [x0, #(_JB_X28_X29 * 8)]
stp x26, x27, [x0, #(_JB_X26_X27 * 8)] stp x26, x27, [x0, #(_JB_X26_X27 * 8)]
@ -135,7 +88,6 @@ ENTRY(sigsetjmp)
stp x22, x23, [x0, #(_JB_X22_X23 * 8)] stp x22, x23, [x0, #(_JB_X22_X23 * 8)]
stp x20, x21, [x0, #(_JB_X20_X21 * 8)] stp x20, x21, [x0, #(_JB_X20_X21 * 8)]
str x19, [x0, #(_JB_X19 * 8)] str x19, [x0, #(_JB_X19 * 8)]
m_unmangle_registers x1, sp_reg=x10
// Save floating point registers. // Save floating point registers.
stp d14, d15, [x0, #(_JB_D14_D15 * 8)] stp d14, d15, [x0, #(_JB_D14_D15 * 8)]
@ -150,60 +102,30 @@ END(sigsetjmp)
// void siglongjmp(sigjmp_buf env, int value); // void siglongjmp(sigjmp_buf env, int value);
ENTRY(siglongjmp) ENTRY(siglongjmp)
// Do we need to restore the signal mask? // Do we need to restore the signal mask?
ldr x2, [x0, #(_JB_SIGFLAG * 8)] ldr w9, [x0, #(_JB_SIGFLAG * 8)]
tbz w2, #0, 1f cbz w9, 1f
stp x0, x30, [sp, #-16]!
.cfi_adjust_cfa_offset 16
.cfi_rel_offset x0, 0
.cfi_rel_offset x30, 8
// Restore signal mask. // Restore signal mask.
stp x0, x30, [sp, #-16]!
mov x19, x1 // Save 'value'. mov x19, x1 // Save 'value'.
mov x2, x0 mov x2, x0
mov x0, #2 // SIG_SETMASK mov x0, #2 // SIG_SETMASK
add x1, x2, #(_JB_SIGMASK * 8) // new_mask. add x1, x2, #(_JB_SIGMASK * 8) // new_mask.
mov x2, #0 // NULL. mov x2, #0 // NULL.
bl sigprocmask bl sigprocmask
mov x1, x19 // Restore 'value'. mov x1, x19 // Restore 'value'.
// Restore original x0 and lr.
ldp x0, x30, [sp], #16 ldp x0, x30, [sp], #16
.cfi_adjust_cfa_offset -16
.cfi_restore x0
.cfi_restore x30
ldr x2, [x0, #(_JB_SIGFLAG * 8)]
1: 1:
// Restore core registers. // Restore core registers.
bic x2, x2, #1
ldp x30, x10, [x0, #(_JB_X30_SP * 8)] ldp x30, x10, [x0, #(_JB_X30_SP * 8)]
mov sp, x10
ldp x28, x29, [x0, #(_JB_X28_X29 * 8)] ldp x28, x29, [x0, #(_JB_X28_X29 * 8)]
ldp x26, x27, [x0, #(_JB_X26_X27 * 8)] ldp x26, x27, [x0, #(_JB_X26_X27 * 8)]
ldp x24, x25, [x0, #(_JB_X24_X25 * 8)] ldp x24, x25, [x0, #(_JB_X24_X25 * 8)]
ldp x22, x23, [x0, #(_JB_X22_X23 * 8)] ldp x22, x23, [x0, #(_JB_X22_X23 * 8)]
ldp x20, x21, [x0, #(_JB_X20_X21 * 8)] ldp x20, x21, [x0, #(_JB_X20_X21 * 8)]
ldr x19, [x0, #(_JB_X19 * 8)] ldr x19, [x0, #(_JB_X19 * 8)]
m_unmangle_registers x2, sp_reg=x10
mov sp, x10
stp x0, x1, [sp, #-16]!
.cfi_adjust_cfa_offset 16
.cfi_rel_offset x0, 0
.cfi_rel_offset x1, 8
stp x30, xzr, [sp, #-16]!
.cfi_adjust_cfa_offset 16
.cfi_rel_offset x30, 0
ldr x0, [x0, #(_JB_SIGFLAG * 8)]
bl __bionic_setjmp_cookie_check
ldp x30, xzr, [sp], #16
.cfi_adjust_cfa_offset -16
.cfi_restore x30
ldp x0, x1, [sp], #16
.cfi_adjust_cfa_offset -16
.cfi_restore x0
.cfi_restore x1
// Restore floating point registers. // Restore floating point registers.
ldp d14, d15, [x0, #(_JB_D14_D15 * 8)] ldp d14, d15, [x0, #(_JB_D14_D15 * 8)]
@ -211,6 +133,13 @@ ENTRY(siglongjmp)
ldp d10, d11, [x0, #(_JB_D10_D11 * 8)] ldp d10, d11, [x0, #(_JB_D10_D11 * 8)]
ldp d8, d9, [x0, #(_JB_D8_D9 * 8)] ldp d8, d9, [x0, #(_JB_D8_D9 * 8)]
// Validate sp (sp mod 16 = 0) and lr (lr mod 4 = 0).
tst x30, #3
b.ne longjmperror
mov x10, sp
tst x10, #15
b.ne longjmperror
// Set return value. // Set return value.
cmp w1, wzr cmp w1, wzr
csinc w0, w1, wzr, ne csinc w0, w1, wzr, ne

View File

@ -31,11 +31,6 @@
#include <linux/sched.h> #include <linux/sched.h>
ENTRY(vfork) ENTRY(vfork)
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
mrs x0, tpidr_el0
ldr x0, [x0, #8]
str wzr, [x0, #20]
mov x0, #(CLONE_VM | CLONE_VFORK | SIGCHLD) mov x0, #(CLONE_VM | CLONE_VFORK | SIGCHLD)
mov x1, xzr mov x1, xzr
mov x2, xzr mov x2, xzr

View File

@ -1,7 +1,14 @@
libc_bionic_src_files_arm64 += \ libc_bionic_src_files_arm64 += \
arch-arm64/generic/bionic/memchr.S \
arch-arm64/generic/bionic/memcmp.S \
arch-arm64/denver64/bionic/memcpy.S \ arch-arm64/denver64/bionic/memcpy.S \
arch-arm64/generic/bionic/memmove.S \
arch-arm64/denver64/bionic/memset.S \ arch-arm64/denver64/bionic/memset.S \
arch-arm64/generic/bionic/stpcpy.S \
libc_bionic_src_files_exclude_arm64 += \ arch-arm64/generic/bionic/strchr.S \
arch-arm64/generic/bionic/memcpy.S \ arch-arm64/generic/bionic/strcmp.S \
arch-arm64/generic/bionic/memset.S \ arch-arm64/generic/bionic/strcpy.S \
arch-arm64/generic/bionic/strlen.S \
arch-arm64/generic/bionic/strncmp.S \
arch-arm64/generic/bionic/strnlen.S \
arch-arm64/generic/bionic/wmemmove.S

View File

@ -101,7 +101,7 @@ ENTRY(memchr)
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */ addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */ addp vend.16b, vend.16b, vend.16b /* 128->64 */
mov synd, vend.d[0] mov synd, vend.2d[0]
/* Clear the soff*2 lower bits */ /* Clear the soff*2 lower bits */
lsl tmp, soff, #1 lsl tmp, soff, #1
lsr synd, synd, tmp lsr synd, synd, tmp
@ -121,7 +121,7 @@ ENTRY(memchr)
/* Use a fast check for the termination condition */ /* Use a fast check for the termination condition */
orr vend.16b, vhas_chr1.16b, vhas_chr2.16b orr vend.16b, vhas_chr1.16b, vhas_chr2.16b
addp vend.2d, vend.2d, vend.2d addp vend.2d, vend.2d, vend.2d
mov synd, vend.d[0] mov synd, vend.2d[0]
/* We're not out of data, loop if we haven't found the character */ /* We're not out of data, loop if we haven't found the character */
cbz synd, .Lloop cbz synd, .Lloop
@ -131,7 +131,7 @@ ENTRY(memchr)
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */ addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */ addp vend.16b, vend.16b, vend.16b /* 128->64 */
mov synd, vend.d[0] mov synd, vend.2d[0]
/* Only do the clear for the last possible block */ /* Only do the clear for the last possible block */
b.hi .Ltail b.hi .Ltail

View File

@ -35,6 +35,10 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
/* Parameters and result. */ /* Parameters and result. */
#ifdef BCOPY
#define origdstin x1
#define origsrc x0
#endif
#define dstin x0 #define dstin x0
#define src x1 #define src x1
#define count x2 #define count x2
@ -55,7 +59,13 @@
#define D_l x13 #define D_l x13
#define D_h x14 #define D_h x14
#if defined(WMEMMOVE) #ifdef BCOPY
ENTRY(bcopy)
/* Swap src and dst so that a branch to memcpy doesn't cause issues. */
mov tmp1, origsrc
mov origsrc, origdstin
mov origdstin, tmp1
#elif defined(WMEMMOVE)
ENTRY(wmemmove) ENTRY(wmemmove)
lsl count, count, #2 lsl count, count, #2
#else #else
@ -322,7 +332,9 @@ ENTRY(memmove)
tst count, #0x3f tst count, #0x3f
b.ne .Ltail63down b.ne .Ltail63down
ret ret
#if defined(WMEMMOVE) #ifdef BCOPY
END(bcopy)
#elif defined(WMEMMOVE)
END(wmemmove) END(wmemmove)
#else #else
END(memmove) END(memmove)

View File

@ -109,7 +109,7 @@ ENTRY(strchr)
addp vend1.16b, vend1.16b, vend2.16b // 128->64 addp vend1.16b, vend1.16b, vend2.16b // 128->64
lsr tmp1, tmp3, tmp1 lsr tmp1, tmp3, tmp1
mov tmp3, vend1.d[0] mov tmp3, vend1.2d[0]
bic tmp1, tmp3, tmp1 // Mask padding bits. bic tmp1, tmp3, tmp1 // Mask padding bits.
cbnz tmp1, .Ltail cbnz tmp1, .Ltail
@ -124,7 +124,7 @@ ENTRY(strchr)
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
orr vend1.16b, vend1.16b, vend2.16b orr vend1.16b, vend1.16b, vend2.16b
addp vend1.2d, vend1.2d, vend1.2d addp vend1.2d, vend1.2d, vend1.2d
mov tmp1, vend1.d[0] mov tmp1, vend1.2d[0]
cbz tmp1, .Lloop cbz tmp1, .Lloop
/* Termination condition found. Now need to establish exactly why /* Termination condition found. Now need to establish exactly why
@ -138,7 +138,7 @@ ENTRY(strchr)
addp vend1.16b, vend1.16b, vend2.16b // 256->128 addp vend1.16b, vend1.16b, vend2.16b // 256->128
addp vend1.16b, vend1.16b, vend2.16b // 128->64 addp vend1.16b, vend1.16b, vend2.16b // 128->64
mov tmp1, vend1.d[0] mov tmp1, vend1.2d[0]
.Ltail: .Ltail:
/* Count the trailing zeros, by bit reversing... */ /* Count the trailing zeros, by bit reversing... */
rbit tmp1, tmp1 rbit tmp1, tmp1

View File

@ -0,0 +1,14 @@
libc_bionic_src_files_arm64 += \
arch-arm64/generic/bionic/memchr.S \
arch-arm64/generic/bionic/memcmp.S \
arch-arm64/generic/bionic/memcpy.S \
arch-arm64/generic/bionic/memmove.S \
arch-arm64/generic/bionic/memset.S \
arch-arm64/generic/bionic/stpcpy.S \
arch-arm64/generic/bionic/strchr.S \
arch-arm64/generic/bionic/strcmp.S \
arch-arm64/generic/bionic/strcpy.S \
arch-arm64/generic/bionic/strlen.S \
arch-arm64/generic/bionic/strncmp.S \
arch-arm64/generic/bionic/strnlen.S \
arch-arm64/generic/bionic/wmemmove.S

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___clock_nanosleep) ENTRY(__clock_nanosleep)
mov x8, __NR_clock_nanosleep mov x8, __NR_clock_nanosleep
svc #0 svc #0
@ -11,5 +11,5 @@ ENTRY(___clock_nanosleep)
b.hi __set_errno_internal b.hi __set_errno_internal
ret ret
END(___clock_nanosleep) END(__clock_nanosleep)
.hidden ___clock_nanosleep .hidden __clock_nanosleep

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___rt_sigqueueinfo) ENTRY(__rt_sigqueueinfo)
mov x8, __NR_rt_sigqueueinfo mov x8, __NR_rt_sigqueueinfo
svc #0 svc #0
@ -11,5 +11,5 @@ ENTRY(___rt_sigqueueinfo)
b.hi __set_errno_internal b.hi __set_errno_internal
ret ret
END(___rt_sigqueueinfo) END(__rt_sigqueueinfo)
.hidden ___rt_sigqueueinfo .hidden __rt_sigqueueinfo

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___fgetxattr) ENTRY(fgetxattr)
mov x8, __NR_fgetxattr mov x8, __NR_fgetxattr
svc #0 svc #0
@ -11,5 +11,4 @@ ENTRY(___fgetxattr)
b.hi __set_errno_internal b.hi __set_errno_internal
ret ret
END(___fgetxattr) END(fgetxattr)
.hidden ___fgetxattr

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___flistxattr) ENTRY(flistxattr)
mov x8, __NR_flistxattr mov x8, __NR_flistxattr
svc #0 svc #0
@ -11,5 +11,4 @@ ENTRY(___flistxattr)
b.hi __set_errno_internal b.hi __set_errno_internal
ret ret
END(___flistxattr) END(flistxattr)
.hidden ___flistxattr

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___fsetxattr) ENTRY(fsetxattr)
mov x8, __NR_fsetxattr mov x8, __NR_fsetxattr
svc #0 svc #0
@ -11,5 +11,4 @@ ENTRY(___fsetxattr)
b.hi __set_errno_internal b.hi __set_errno_internal
ret ret
END(___fsetxattr) END(fsetxattr)
.hidden ___fsetxattr

View File

@ -2,7 +2,7 @@
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
ENTRY(___mremap) ENTRY(mremap)
mov x8, __NR_mremap mov x8, __NR_mremap
svc #0 svc #0
@ -11,5 +11,4 @@ ENTRY(___mremap)
b.hi __set_errno_internal b.hi __set_errno_internal
ret ret
END(___mremap) END(mremap)
.hidden ___mremap

View File

@ -1,16 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(preadv)
mov x8, __NR_preadv
svc #0
cmn x0, #(MAX_ERRNO + 1)
cneg x0, x0, hi
b.hi __set_errno_internal
ret
END(preadv)
ALIAS_SYMBOL(preadv64, preadv)

View File

@ -1,14 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_readv)
mov x8, __NR_process_vm_readv
svc #0
cmn x0, #(MAX_ERRNO + 1)
cneg x0, x0, hi
b.hi __set_errno_internal
ret
END(process_vm_readv)

View File

@ -1,14 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_writev)
mov x8, __NR_process_vm_writev
svc #0
cmn x0, #(MAX_ERRNO + 1)
cneg x0, x0, hi
b.hi __set_errno_internal
ret
END(process_vm_writev)

View File

@ -1,16 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(pwritev)
mov x8, __NR_pwritev
svc #0
cmn x0, #(MAX_ERRNO + 1)
cneg x0, x0, hi
b.hi __set_errno_internal
ret
END(pwritev)
ALIAS_SYMBOL(pwritev64, pwritev)

View File

@ -28,9 +28,9 @@
#ifdef __LP64__ #ifdef __LP64__
# define ASM_PTR_SIZE(x) .quad x # define ASM_PTR_SIZE(x) .quad x
# define ASM_ALIGN_TO_PTR_SIZE .balign 8 # define ASM_ALIGN(x)
#else #else
# define ASM_PTR_SIZE(x) .long x # define ASM_PTR_SIZE(x) .long x
# define ASM_ALIGN_TO_PTR_SIZE .balign 4 # define ASM_ALIGN(x) .align x
#endif #endif

View File

@ -29,15 +29,12 @@
#include "asm_multiarch.h" #include "asm_multiarch.h"
.section .preinit_array, "aw" .section .preinit_array, "aw"
ASM_ALIGN_TO_PTR_SIZE
ASM_PTR_SIZE(0) ASM_PTR_SIZE(0)
.section .init_array, "aw" .section .init_array, "aw"
ASM_ALIGN_TO_PTR_SIZE
ASM_PTR_SIZE(0) ASM_PTR_SIZE(0)
.section .fini_array, "aw" .section .fini_array, "aw"
ASM_ALIGN_TO_PTR_SIZE
ASM_PTR_SIZE(0) ASM_PTR_SIZE(0)
#if defined(__linux__) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__)
@ -45,9 +42,7 @@
#endif #endif
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
.section .eh_frame,"a",@progbits .section .eh_frame,"a",@progbits
#if defined(__i386__) ASM_ALIGN(4)
.balign 4
#endif
.type __FRAME_END__, @object .type __FRAME_END__, @object
.size __FRAME_END__, 4 .size __FRAME_END__, 4
__FRAME_END__: __FRAME_END__:

View File

@ -26,14 +26,22 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#include "asm_multiarch.h"
#ifndef __arm__
.section .init_array, "aw"
ASM_PTR_SIZE(0)
.section .fini_array, "aw"
ASM_PTR_SIZE(0)
#endif
#if defined(__linux__) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits .section .note.GNU-stack,"",%progbits
#endif #endif
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
.section .eh_frame,"a",@progbits .section .eh_frame,"a",@progbits
#if defined(__i386__) ASM_ALIGN(4)
.balign 4
#endif
.type __FRAME_END__, @object .type __FRAME_END__, @object
.size __FRAME_END__, 4 .size __FRAME_END__, 4
__FRAME_END__: __FRAME_END__:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2014 The Android Open Source Project * Copyright (C) 2014-2015 The Android Open Source Project
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -132,9 +132,10 @@
/* field: byte offset: size: */ /* field: byte offset: size: */
/* dynam filler (0*4) 0-4 bytes of rounddown filler, DON'T TOUCH!! /* dynam filler (0*4) 0-4 bytes of rounddown filler, DON'T TOUCH!!
often overlays user storage!! */ often overlays user storage!! */
#define SC_FPSR_OFFSET (1*4) /* 4 bytes, floating point control/status reg */ #define SC_MAGIC_OFFSET (1*4) /* 4 bytes, identify jmpbuf, first actual field */
#define SC_FLAG_OFFSET (2*4) /* 4 bytes, savesigs flag */
#define SC_FPSR_OFFSET (3*4) /* 4 bytes, floating point control/status reg */
/* following fields are 8-byte aligned */ /* following fields are 8-byte aligned */
#define SC_FLAG_OFFSET (2*4) /* 8 bytes, cookie and savesigs flag, first actual field */
#define SC_MASK_OFFSET (4*4) /* 16 bytes, mips32/mips64 version of sigset_t */ #define SC_MASK_OFFSET (4*4) /* 16 bytes, mips32/mips64 version of sigset_t */
#define SC_SPARE_OFFSET (8*4) /* 8 bytes, reserved for future uses */ #define SC_SPARE_OFFSET (8*4) /* 8 bytes, reserved for future uses */
@ -165,16 +166,6 @@
#error _JBLEN is too small #error _JBLEN is too small
#endif #endif
.macro m_mangle_reg_and_store reg, cookie, temp, offset
xor \temp, \reg, \cookie
REG_S \temp, \offset
.endm
.macro m_unmangle_reg_and_load reg, cookie, temp, offset
REG_L \temp, \offset
xor \reg, \temp, \cookie
.endm
/* /*
* *
* GPOFF and FRAMESIZE must be the same for all setjmp/longjmp routines * GPOFF and FRAMESIZE must be the same for all setjmp/longjmp routines
@ -199,46 +190,36 @@ setjmp_common:
li t0, ~7 li t0, ~7
and a0, t0 # round jmpbuf addr DOWN to 8-byte boundary and a0, t0 # round jmpbuf addr DOWN to 8-byte boundary
#endif #endif
sw a1, SC_FLAG_OFFSET(a0) # save savesigs flag
beqz a1, 1f # do saving of signal mask?
REG_S ra, RAOFF(sp) # spill state REG_S ra, RAOFF(sp) # spill state
REG_S a0, A0OFF(sp) REG_S a0, A0OFF(sp)
# get the cookie and store it along with the signal flag.
move a0, a1
jal __bionic_setjmp_cookie_get
REG_L a0, A0OFF(sp)
REG_S v0, SC_FLAG_OFFSET(a0) # save cookie and savesigs flag
andi t0, v0, 1 # extract savesigs flag
beqz t0, 1f # do saving of signal mask?
# call sigprocmask(int how ignored, sigset_t* null, sigset_t* SC_MASK(a0)): # call sigprocmask(int how ignored, sigset_t* null, sigset_t* SC_MASK(a0)):
LA a2, SC_MASK_OFFSET(a0) # gets current signal mask LA a2, SC_MASK_OFFSET(a0) # gets current signal mask
li a0, 0 # how; ignored when new mask is null li a0, 0 # how; ignored when new mask is null
li a1, 0 # null new mask li a1, 0 # null new mask
jal sigprocmask # get current signal mask jal sigprocmask # get current signal mask
REG_L a0, A0OFF(sp) REG_L a0, A0OFF(sp)
1:
REG_L gp, GPOFF(sp) # restore spills
REG_L ra, RAOFF(sp) REG_L ra, RAOFF(sp)
REG_L t0, SC_FLAG_OFFSET(a0) # move cookie to temp reg 1:
li v0, 0xACEDBADE # sigcontext magic number
sw v0, SC_MAGIC_OFFSET(a0)
# callee-saved long-sized regs: # callee-saved long-sized regs:
PTR_ADDU v1, sp, FRAMESZ # save orig sp REG_S ra, SC_REGS+0*REGSZ(a0)
REG_S s0, SC_REGS+1*REGSZ(a0)
# m_mangle_reg_and_store reg, cookie, temp, offset REG_S s1, SC_REGS+2*REGSZ(a0)
m_mangle_reg_and_store ra, t0, t1, SC_REGS+0*REGSZ(a0) REG_S s2, SC_REGS+3*REGSZ(a0)
m_mangle_reg_and_store s0, t0, t2, SC_REGS+1*REGSZ(a0) REG_S s3, SC_REGS+4*REGSZ(a0)
m_mangle_reg_and_store s1, t0, t3, SC_REGS+2*REGSZ(a0) REG_S s4, SC_REGS+5*REGSZ(a0)
m_mangle_reg_and_store s2, t0, t1, SC_REGS+3*REGSZ(a0) REG_S s5, SC_REGS+6*REGSZ(a0)
m_mangle_reg_and_store s3, t0, t2, SC_REGS+4*REGSZ(a0) REG_S s6, SC_REGS+7*REGSZ(a0)
m_mangle_reg_and_store s4, t0, t3, SC_REGS+5*REGSZ(a0) REG_S s7, SC_REGS+8*REGSZ(a0)
m_mangle_reg_and_store s5, t0, t1, SC_REGS+6*REGSZ(a0) REG_S s8, SC_REGS+9*REGSZ(a0)
m_mangle_reg_and_store s6, t0, t2, SC_REGS+7*REGSZ(a0) REG_L v0, GPOFF(sp)
m_mangle_reg_and_store s7, t0, t3, SC_REGS+8*REGSZ(a0) REG_S v0, SC_REGS+10*REGSZ(a0) # save gp
m_mangle_reg_and_store s8, t0, t1, SC_REGS+9*REGSZ(a0) PTR_ADDU v0, sp, FRAMESZ
m_mangle_reg_and_store gp, t0, t2, SC_REGS+10*REGSZ(a0) REG_S v0, SC_REGS+11*REGSZ(a0) # save orig sp
m_mangle_reg_and_store v1, t0, t3, SC_REGS+11*REGSZ(a0)
cfc1 v0, $31 cfc1 v0, $31
@ -307,41 +288,36 @@ NON_LEAF(siglongjmp, FRAMESZ, ra)
li t0, ~7 li t0, ~7
and a0, t0 # round jmpbuf addr DOWN to 8-byte boundary and a0, t0 # round jmpbuf addr DOWN to 8-byte boundary
#endif #endif
lw v0, SC_MAGIC_OFFSET(a0)
li t0, 0xACEDBADE
bne v0, t0, longjmp_botch # jump if error
move s1, a1 # temp spill lw t0, SC_FLAG_OFFSET(a0) # get savesigs flag
move s0, a0
# extract savesigs flag
REG_L s2, SC_FLAG_OFFSET(s0)
andi t0, s2, 1
beqz t0, 1f # restore signal mask? beqz t0, 1f # restore signal mask?
REG_S a1, A1OFF(sp) # temp spill
REG_S a0, A0OFF(sp)
# call sigprocmask(int how SIG_SETMASK, sigset_t* SC_MASK(a0), sigset_t* null): # call sigprocmask(int how SIG_SETMASK, sigset_t* SC_MASK(a0), sigset_t* null):
LA a1, SC_MASK_OFFSET(s0) # signals being restored LA a1, SC_MASK_OFFSET(a0) # signals being restored
li a0, 3 # mips SIG_SETMASK li a0, 3 # mips SIG_SETMASK
li a2, 0 # null li a2, 0 # null
jal sigprocmask # restore signal mask jal sigprocmask # restore signal mask
REG_L a0, A0OFF(sp)
REG_L a1, A1OFF(sp)
1: 1:
move t0, s2 # get cookie to temp reg
move a1, s1
move a0, s0
# callee-saved long-sized regs: # callee-saved long-sized regs:
REG_L ra, SC_REGS+0*REGSZ(a0)
# m_unmangle_reg_and_load reg, cookie, temp, offset REG_L s0, SC_REGS+1*REGSZ(a0)
# don't restore gp yet, old value is needed for cookie_check call REG_L s1, SC_REGS+2*REGSZ(a0)
m_unmangle_reg_and_load ra, t0, t1, SC_REGS+0*REGSZ(a0) REG_L s2, SC_REGS+3*REGSZ(a0)
m_unmangle_reg_and_load s0, t0, t2, SC_REGS+1*REGSZ(a0) REG_L s3, SC_REGS+4*REGSZ(a0)
m_unmangle_reg_and_load s1, t0, t3, SC_REGS+2*REGSZ(a0) REG_L s4, SC_REGS+5*REGSZ(a0)
m_unmangle_reg_and_load s2, t0, t1, SC_REGS+3*REGSZ(a0) REG_L s5, SC_REGS+6*REGSZ(a0)
m_unmangle_reg_and_load s3, t0, t2, SC_REGS+4*REGSZ(a0) REG_L s6, SC_REGS+7*REGSZ(a0)
m_unmangle_reg_and_load s4, t0, t3, SC_REGS+5*REGSZ(a0) REG_L s7, SC_REGS+8*REGSZ(a0)
m_unmangle_reg_and_load s5, t0, t1, SC_REGS+6*REGSZ(a0) REG_L s8, SC_REGS+9*REGSZ(a0)
m_unmangle_reg_and_load s6, t0, t2, SC_REGS+7*REGSZ(a0) REG_L gp, SC_REGS+10*REGSZ(a0)
m_unmangle_reg_and_load s7, t0, t3, SC_REGS+8*REGSZ(a0) REG_L sp, SC_REGS+11*REGSZ(a0)
m_unmangle_reg_and_load s8, t0, t1, SC_REGS+9*REGSZ(a0)
m_unmangle_reg_and_load v1, t0, t2, SC_REGS+10*REGSZ(a0)
m_unmangle_reg_and_load sp, t0, t3, SC_REGS+11*REGSZ(a0)
lw v0, SC_FPSR_OFFSET(a0) lw v0, SC_FPSR_OFFSET(a0)
ctc1 v0, $31 # restore old fr mode before fp values ctc1 v0, $31 # restore old fr mode before fp values
@ -365,22 +341,15 @@ NON_LEAF(siglongjmp, FRAMESZ, ra)
l.d $f28, SC_FPREGS+4*REGSZ_FP(a0) l.d $f28, SC_FPREGS+4*REGSZ_FP(a0)
l.d $f30, SC_FPREGS+5*REGSZ_FP(a0) l.d $f30, SC_FPREGS+5*REGSZ_FP(a0)
#endif #endif
bne a1, zero, 1f
# check cookie li a1, 1 # never return 0!
PTR_SUBU sp, FRAMESZ 1:
REG_S v1, GPOFF(sp) move v0, a1
REG_S ra, RAOFF(sp)
REG_S a1, A1OFF(sp)
move a0, t0
jal __bionic_setjmp_cookie_check
REG_L gp, GPOFF(sp)
REG_L ra, RAOFF(sp)
REG_L a1, A1OFF(sp)
PTR_ADDU sp, FRAMESZ
sltiu t0, a1, 1 # never return 0!
xor v0, a1, t0
j ra # return to setjmp call site j ra # return to setjmp call site
longjmp_botch:
jal longjmperror
jal abort
END(siglongjmp) END(siglongjmp)
ALIAS_SYMBOL(longjmp, siglongjmp) ALIAS_SYMBOL(longjmp, siglongjmp)

View File

@ -37,14 +37,6 @@ ENTRY(vfork)
.set noreorder .set noreorder
.cpload t9 .cpload t9
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
.set push
.set mips32r2
rdhwr v0, $29 // v0 = tls; kernel trap on mips32r1
.set pop
lw v0, REGSZ*1(v0) // v0 = v0[TLS_SLOT_THREAD_ID ie 1]
sw $0, REGSZ*2+4(v0) // v0->cached_pid_ = 0
li a0, (CLONE_VM | CLONE_VFORK | SIGCHLD) li a0, (CLONE_VM | CLONE_VFORK | SIGCHLD)
li a1, 0 li a1, 0
li a2, 0 li a2, 0

View File

@ -1,10 +1,43 @@
# 32-bit mips. # 32-bit mips.
#
# Default implementations of functions that are commonly optimized.
#
libc_bionic_src_files_mips += \ libc_bionic_src_files_mips += \
arch-mips/string/memcmp.c \ arch-mips/string/memcmp.c \
arch-mips/string/memcpy.S \ bionic/__memcpy_chk.cpp \
arch-mips/string/memset.S \ bionic/__memset_chk.cpp \
arch-mips/string/strcmp.S \ bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
libc_freebsd_src_files_mips += \
upstream-freebsd/lib/libc/string/wcscat.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
upstream-freebsd/lib/libc/string/wcscpy.c \
upstream-freebsd/lib/libc/string/wcslen.c \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_mips += \
upstream-openbsd/lib/libc/string/memchr.c \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/memrchr.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
upstream-openbsd/lib/libc/string/strcmp.c \
upstream-openbsd/lib/libc/string/strcpy.c \
upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/strlcpy.c \
upstream-openbsd/lib/libc/string/strncat.c \
upstream-openbsd/lib/libc/string/strncmp.c \
upstream-openbsd/lib/libc/string/strncpy.c \
# #
# Inherently architecture-specific code. # Inherently architecture-specific code.
@ -21,10 +54,14 @@ libc_bionic_src_files_mips += \
ifndef ARCH_MIPS_REV6 ifndef ARCH_MIPS_REV6
libc_bionic_src_files_mips += \ libc_bionic_src_files_mips += \
arch-mips/string/memcpy.S \
arch-mips/string/memset.S \
arch-mips/string/mips_strlen.c \ arch-mips/string/mips_strlen.c \
else else
libc_bionic_src_files_mips += \ libc_bionic_src_files_mips += \
arch-mips/string/memcpy.c \
arch-mips/string/memset.c \
arch-mips/string/strlen.c \ arch-mips/string/strlen.c \
endif endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
/* $OpenBSD: memcpy.c,v 1.1 2014/11/30 19:43:56 deraadt Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Chris Torek.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <string.h>
#include <stdlib.h>
#include <syslog.h>
/*
* sizeof(word) MUST BE A POWER OF TWO
* SO THAT wmask BELOW IS ALL ONES
*/
typedef long word; /* "word" used for optimal copy speed */
#define wsize sizeof(word)
#define wmask (wsize - 1)
/*
* Copy a block of memory, not handling overlap.
*/
void *
memcpy(void *dst0, const void *src0, size_t length)
{
char *dst = dst0;
const char *src = src0;
size_t t;
if (length == 0 || dst == src) /* nothing to do */
goto done;
/*
* Macros: loop-t-times; and loop-t-times, t>0
*/
#define TLOOP(s) if (t) TLOOP1(s)
#define TLOOP1(s) do { s; } while (--t)
/*
* Copy forward.
*/
t = (long)src; /* only need low bits */
if ((t | (long)dst) & wmask) {
/*
* Try to align operands. This cannot be done
* unless the low bits match.
*/
if ((t ^ (long)dst) & wmask || length < wsize)
t = length;
else
t = wsize - (t & wmask);
length -= t;
TLOOP1(*dst++ = *src++);
}
/*
* Copy whole words, then mop up any trailing bytes.
*/
t = length / wsize;
TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize);
t = length & wmask;
TLOOP(*dst++ = *src++);
done:
return (dst0);
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 * Copyright (c) 2009
* MIPS Technologies, Inc., California. * MIPS Technologies, Inc., California.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -27,410 +27,216 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifdef __ANDROID__ /************************************************************************
*
* memset.S, version "64h" with 1 cache line horizon for "pref 30" and 14 nops
* Version: "043009"
*
************************************************************************/
/************************************************************************
* Include files
************************************************************************/
#include <private/bionic_asm.h> #include <private/bionic_asm.h>
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
#elif _LIBC /*
# include <sysdep.h> * This routine could be optimized for MIPS64. The current code only
# include <regdef.h> * uses MIPS32 instructions.
# include <sys/asm.h> */
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
#elif _COMPILING_NEWLIB #if defined(__MIPSEB__)
# include "machine/asm.h" # define SWHI swl /* high part is left in big-endian */
# include "machine/regdef.h" # define SWLO swr /* low part is right in big-endian */
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE #endif
#if defined(__MIPSEL__)
# define SWHI swr /* high part is right in little-endian */
# define SWLO swl /* low part is left in little-endian */
#endif
#if !(defined(XGPROF) || defined(XPROF))
#undef SETUP_GP
#define SETUP_GP
#endif
#ifdef NDEBUG
#define DBG #
#else #else
# include <regdef.h> #define DBG
# include <sys/asm.h>
#endif #endif
/* Check to see if the MIPS architecture we are compiling for supports LEAF(memset,0)
prefetching. */
#if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64)
# ifndef DISABLE_PREFETCH
# define USE_PREFETCH
# endif
#endif
#if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32))
# ifndef DISABLE_DOUBLE
# define USE_DOUBLE
# endif
#endif
#ifndef USE_DOUBLE
# ifndef DISABLE_DOUBLE_ALIGN
# define DOUBLE_ALIGN
# endif
#endif
/* Some asm.h files do not have the L macro definition. */
#ifndef L
# if _MIPS_SIM == _ABIO32
# define L(label) $L ## label
# else
# define L(label) .L ## label
# endif
#endif
/* Some asm.h files do not have the PTR_ADDIU macro definition. */
#ifndef PTR_ADDIU
# if _MIPS_SIM == _ABIO32
# define PTR_ADDIU addiu
# else
# define PTR_ADDIU daddiu
# endif
#endif
/* New R6 instructions that may not be in asm.h. */
#ifndef PTR_LSA
# if _MIPS_SIM == _ABIO32
# define PTR_LSA lsa
# else
# define PTR_LSA dlsa
# endif
#endif
/* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
or PREFETCH_STORE_STREAMED offers a large performance advantage
but PREPAREFORSTORE has some special restrictions to consider.
Prefetch with the 'prepare for store' hint does not copy a memory
location into the cache, it just allocates a cache line and zeros
it out. This means that if you do not write to the entire cache
line before writing it out to memory some data will get zero'ed out
when the cache line is written back to memory and data will be lost.
There are ifdef'ed sections of this memcpy to make sure that it does not
do prefetches on cache lines that are not going to be completely written.
This code is only needed and only used when PREFETCH_STORE_HINT is set to
PREFETCH_HINT_PREPAREFORSTORE. This code assumes that cache lines are
less than MAX_PREFETCH_SIZE bytes and if the cache line is larger it will
not work correctly. */
#ifdef USE_PREFETCH
# define PREFETCH_HINT_STORE 1
# define PREFETCH_HINT_STORE_STREAMED 5
# define PREFETCH_HINT_STORE_RETAINED 7
# define PREFETCH_HINT_PREPAREFORSTORE 30
/* If we have not picked out what hints to use at this point use the
standard load and store prefetch hints. */
# ifndef PREFETCH_STORE_HINT
# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
# endif
/* We double everything when USE_DOUBLE is true so we do 2 prefetches to
get 64 bytes in that case. The assumption is that each individual
prefetch brings in 32 bytes. */
# ifdef USE_DOUBLE
# define PREFETCH_CHUNK 64
# define PREFETCH_FOR_STORE(chunk, reg) \
pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
# else
# define PREFETCH_CHUNK 32
# define PREFETCH_FOR_STORE(chunk, reg) \
pref PREFETCH_STORE_HINT, (chunk)*32(reg)
# endif
/* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
than PREFETCH_CHUNK, the assumed size of each prefetch. If the real size
of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE
hint is used, the code will not work correctly. If PREPAREFORSTORE is not
used than MAX_PREFETCH_SIZE does not matter. */
# define MAX_PREFETCH_SIZE 128
/* PREFETCH_LIMIT is set based on the fact that we never use an offset greater
than 5 on a STORE prefetch and that a single prefetch can never be larger
than MAX_PREFETCH_SIZE. We add the extra 32 when USE_DOUBLE is set because
we actually do two prefetches in that case, one 32 bytes after the other. */
# ifdef USE_DOUBLE
# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
# else
# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
# endif
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
&& ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
/* We cannot handle this because the initial prefetches may fetch bytes that
are before the buffer being copied. We start copies with an offset
of 4 so avoid this situation when using PREPAREFORSTORE. */
# error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
# endif
#else /* USE_PREFETCH not defined */
# define PREFETCH_FOR_STORE(offset, reg)
#endif
#if __mips_isa_rev > 5
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
# undef PREFETCH_STORE_HINT
# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
# endif
# define R6_CODE
#endif
/* Allow the routine to be named something else if desired. */
#ifndef MEMSET_NAME
# define MEMSET_NAME memset
#endif
/* We load/store 64 bits at a time when USE_DOUBLE is true.
The C_ prefix stands for CHUNK and is used to avoid macro name
conflicts with system header files. */
#ifdef USE_DOUBLE
# define C_ST sd
# if __MIPSEB
# define C_STHI sdl /* high part is left in big-endian */
# else
# define C_STHI sdr /* high part is right in little-endian */
# endif
#else
# define C_ST sw
# if __MIPSEB
# define C_STHI swl /* high part is left in big-endian */
# else
# define C_STHI swr /* high part is right in little-endian */
# endif
#endif
/* Bookkeeping values for 32 vs. 64 bit mode. */
#ifdef USE_DOUBLE
# define NSIZE 8
# define NSIZEMASK 0x3f
# define NSIZEDMASK 0x7f
#else
# define NSIZE 4
# define NSIZEMASK 0x1f
# define NSIZEDMASK 0x3f
#endif
#define UNIT(unit) ((unit)*NSIZE)
#define UNITM1(unit) (((unit)*NSIZE)-1)
#ifdef __ANDROID__
LEAF(MEMSET_NAME,0)
#else
LEAF(MEMSET_NAME)
#endif
.set nomips16
.set noreorder .set noreorder
/* If the size is less than 2*NSIZE (8 or 16), go to L(lastb). Regardless of .set noat
size, copy dst pointer to v0 for the return value. */
slti t2,a2,(2 * NSIZE)
bne t2,zero,L(lastb)
move v0,a0
/* If memset value is not zero, we copy it to all the bytes in a 32 or 64 addu t0,a0,a2 # t0 is the "past the end" address
bit word. */ slti AT,a2,4 # is a2 less than 4?
beq a1,zero,L(set0) /* If memset value is zero no smear */ bne AT,zero,.Llast4 # if yes, go to last4
PTR_SUBU a3,zero,a0 move v0,a0 # memset returns the dst pointer
beq a1,zero,.Lset0
subu v1,zero,a0
# smear byte into 32 bit word
#if (__mips==32) && (__mips_isa_rev>=2)
ins a1, a1, 8, 8 # Replicate fill byte into half-word.
ins a1, a1, 16, 16 # Replicate fill byte into word.
#else
and a1,0xff
sll AT,a1,8
or a1,AT
sll AT,a1,16
or a1,AT
#endif
.Lset0:
andi v1,v1,0x3 # word-unaligned address?
beq v1,zero,.Laligned # v1 is the unalignment count
subu a2,a2,v1
SWHI a1,0(a0)
addu a0,a0,v1
# Here we have the "word-aligned" a0 (until the "last4")
.Laligned:
andi t8,a2,0x3f # any 64-byte chunks?
# t8 is the byte count past 64-byte chunks
beq a2,t8,.Lchk8w # when a2==t8, no 64-byte chunks
# There will be at most 1 32-byte chunk then
subu a3,a2,t8 # subtract from a2 the reminder
# Here a3 counts bytes in 16w chunks
addu a3,a0,a3 # Now a3 is the final dst after 64-byte chunks
# Find out, if there are any 64-byte chunks after which will be still at least
# 96 bytes left. The value "96" is calculated as needed buffer for
# "pref 30,64(a0)" prefetch, which can be used as "pref 30,0(a0)" after
# incrementing "a0" by 64.
# For "a2" below 160 there will be no such "pref 30 safe" 64-byte chunk.
#
sltiu v1,a2,160
bgtz v1,.Lloop16w_nopref30 # skip "pref 30,0(a0)"
subu t7,a2,96 # subtract "pref 30 unsafe" region
# below we have at least 1 64-byte chunk which is "pref 30 safe"
andi t6,t7,0x3f # t6 is past "64-byte safe chunks" reminder
subu t5,t7,t6 # subtract from t7 the reminder
# Here t5 counts bytes in 16w "safe" chunks
addu t4,a0,t5 # Now t4 is the dst after 64-byte "safe" chunks
# Don't use "pref 30,0(a0)" for a0 in a "middle" of a cache line
# pref 30,0(a0)
# Here we are in the region, where it is safe to use "pref 30,64(a0)"
.Lloop16w:
addiu a0,a0,64
pref 30,-32(a0) # continue setting up the dest, addr 64-32
sw a1,-64(a0)
sw a1,-60(a0)
sw a1,-56(a0)
sw a1,-52(a0)
sw a1,-48(a0)
sw a1,-44(a0)
sw a1,-40(a0)
sw a1,-36(a0)
nop
nop # the extra nop instructions help to balance
nop # cycles needed for "store" + "fill" + "evict"
nop # For 64byte store there are needed 8 fill
nop # and 8 evict cycles, i.e. at least 32 instr.
nop
nop
pref 30,0(a0) # continue setting up the dest, addr 64-0
sw a1,-32(a0)
sw a1,-28(a0)
sw a1,-24(a0)
sw a1,-20(a0)
sw a1,-16(a0)
sw a1,-12(a0)
sw a1,-8(a0)
sw a1,-4(a0)
nop
nop
nop
nop # NOTE: adding 14 nop-s instead of 12 nop-s
nop # gives better results for "fast" memory
nop
bne a0,t4,.Lloop16w
nop nop
/* smear byte into 32 or 64 bit word */ beq a0,a3,.Lchk8w # maybe no more 64-byte chunks?
#if ((__mips == 64) || (__mips == 32)) && (__mips_isa_rev >= 2) nop # this "delayed slot" is useless ...
# ifdef USE_DOUBLE
dins a1, a1, 8, 8 /* Replicate fill byte into half-word. */
dins a1, a1, 16, 16 /* Replicate fill byte into word. */
dins a1, a1, 32, 32 /* Replicate fill byte into dbl word. */
# else
ins a1, a1, 8, 8 /* Replicate fill byte into half-word. */
ins a1, a1, 16, 16 /* Replicate fill byte into word. */
# endif
#else
# ifdef USE_DOUBLE
and a1,0xff
dsll t2,a1,8
or a1,t2
dsll t2,a1,16
or a1,t2
dsll t2,a1,32
or a1,t2
# else
and a1,0xff
sll t2,a1,8
or a1,t2
sll t2,a1,16
or a1,t2
# endif
#endif
/* If the destination address is not aligned do a partial store to get it .Lloop16w_nopref30: # there could be up to 3 "64-byte nopref30" chunks
aligned. If it is already aligned just jump to L(aligned). */ addiu a0,a0,64
L(set0): sw a1,-64(a0)
#ifndef R6_CODE sw a1,-60(a0)
andi t2,a3,(NSIZE-1) /* word-unaligned address? */ sw a1,-56(a0)
beq t2,zero,L(aligned) /* t2 is the unalignment count */ sw a1,-52(a0)
PTR_SUBU a2,a2,t2 sw a1,-48(a0)
C_STHI a1,0(a0) sw a1,-44(a0)
PTR_ADDU a0,a0,t2 sw a1,-40(a0)
#else /* R6_CODE */ sw a1,-36(a0)
andi t2,a0,(NSIZE-1) sw a1,-32(a0)
lapc t9,L(atable) sw a1,-28(a0)
PTR_LSA t9,t2,t9,2 sw a1,-24(a0)
jrc t9 sw a1,-20(a0)
L(atable): sw a1,-16(a0)
bc L(aligned) sw a1,-12(a0)
# ifdef USE_DOUBLE sw a1,-8(a0)
bc L(lb7) bne a0,a3,.Lloop16w_nopref30
bc L(lb6) sw a1,-4(a0)
bc L(lb5)
bc L(lb4)
# endif
bc L(lb3)
bc L(lb2)
bc L(lb1)
L(lb7):
sb a1,6(a0)
L(lb6):
sb a1,5(a0)
L(lb5):
sb a1,4(a0)
L(lb4):
sb a1,3(a0)
L(lb3):
sb a1,2(a0)
L(lb2):
sb a1,1(a0)
L(lb1):
sb a1,0(a0)
li t9,NSIZE .Lchk8w: # t8 here is the byte count past 64-byte chunks
subu t2,t9,t2
PTR_SUBU a2,a2,t2 andi t7,t8,0x1f # is there a 32-byte chunk?
PTR_ADDU a0,a0,t2 # the t7 is the reminder count past 32-bytes
#endif /* R6_CODE */ beq t8,t7,.Lchk1w # when t8==t7, no 32-byte chunk
move a2,t7
L(aligned):
/* If USE_DOUBLE is not set we may still want to align the data on a 16
byte boundry instead of an 8 byte boundry to maximize the opportunity
of proAptiv chips to do memory bonding (combining two sequential 4
byte stores into one 8 byte store). We know there are at least 4 bytes
left to store or we would have jumped to L(lastb) earlier in the code. */
#ifdef DOUBLE_ALIGN
andi t2,a3,4
beq t2,zero,L(double_aligned)
PTR_SUBU a2,a2,t2
sw a1,0(a0) sw a1,0(a0)
PTR_ADDU a0,a0,t2 sw a1,4(a0)
L(double_aligned): sw a1,8(a0)
#endif sw a1,12(a0)
sw a1,16(a0)
sw a1,20(a0)
sw a1,24(a0)
sw a1,28(a0)
addiu a0,a0,32
/* Now the destination is aligned to (word or double word) aligned address .Lchk1w:
Set a2 to count how many bytes we have to copy after all the 64/128 byte andi t8,a2,0x3 # now t8 is the reminder past 1w chunks
chunks are copied and a3 to the dest pointer after all the 64/128 byte beq a2,t8,.Llast4aligned
chunks have been copied. We will loop, incrementing a0 until it equals subu a3,a2,t8 # a3 is the count of bytes in 1w chunks
a3. */ addu a3,a0,a3 # now a3 is the dst address past the 1w chunks
andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
beq a2,t8,L(chkw) /* if a2==t8, no 64-byte/128-byte chunks */
PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */
PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */
/* When in the loop we may prefetch with the 'prepare to store' hint, # copying in words (4-byte chunks)
in this case the a0+x should not be past the "t0-32" address. This .LwordCopy_loop:
means: for x=128 the last "safe" a0 address is "t0-160". Alternatively, addiu a0,a0,4
for x=64 the last "safe" a0 address is "t0-96" In the current version we bne a0,a3,.LwordCopy_loop
will use "prefetch hint,128(a0)", so "t0-160" is the limit. */ sw a1,-4(a0)
#if defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */
PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
#endif
#if defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
PREFETCH_FOR_STORE (1, a0)
PREFETCH_FOR_STORE (2, a0)
PREFETCH_FOR_STORE (3, a0)
#endif
L(loop16w): # store last 0-3 bytes
#if defined(USE_PREFETCH) \ # this will repeat the last store if the memset finishes on a word boundary
&& (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) .Llast4aligned:
sltu v1,t9,a0 /* If a0 > t9 don't use next prefetch */ j ra
bgtz v1,L(skip_pref) SWLO a1,-1(t0)
nop
#endif
#ifndef R6_CODE
PREFETCH_FOR_STORE (4, a0)
PREFETCH_FOR_STORE (5, a0)
#else
PREFETCH_FOR_STORE (2, a0)
#endif
L(skip_pref):
C_ST a1,UNIT(0)(a0)
C_ST a1,UNIT(1)(a0)
C_ST a1,UNIT(2)(a0)
C_ST a1,UNIT(3)(a0)
C_ST a1,UNIT(4)(a0)
C_ST a1,UNIT(5)(a0)
C_ST a1,UNIT(6)(a0)
C_ST a1,UNIT(7)(a0)
C_ST a1,UNIT(8)(a0)
C_ST a1,UNIT(9)(a0)
C_ST a1,UNIT(10)(a0)
C_ST a1,UNIT(11)(a0)
C_ST a1,UNIT(12)(a0)
C_ST a1,UNIT(13)(a0)
C_ST a1,UNIT(14)(a0)
C_ST a1,UNIT(15)(a0)
PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */
bne a0,a3,L(loop16w)
nop
move a2,t8
/* Here we have dest word-aligned but less than 64-bytes or 128 bytes to go. .Llast4:
Check for a 32(64) byte chunk and copy if if there is one. Otherwise beq a0,t0,.Llast4e
jump down to L(chk1w) to handle the tail end of the copy. */ .Llast4l:
L(chkw): addiu a0,a0,1
andi t8,a2,NSIZEMASK /* is there a 32-byte/64-byte chunk. */ bne a0,t0,.Llast4l
/* the t8 is the reminder count past 32-bytes */
beq a2,t8,L(chk1w)/* when a2==t8, no 32-byte chunk */
nop
C_ST a1,UNIT(0)(a0)
C_ST a1,UNIT(1)(a0)
C_ST a1,UNIT(2)(a0)
C_ST a1,UNIT(3)(a0)
C_ST a1,UNIT(4)(a0)
C_ST a1,UNIT(5)(a0)
C_ST a1,UNIT(6)(a0)
C_ST a1,UNIT(7)(a0)
PTR_ADDIU a0,a0,UNIT(8)
/* Here we have less than 32(64) bytes to set. Set up for a loop to
copy one word (or double word) at a time. Set a2 to count how many
bytes we have to copy after all the word (or double word) chunks are
copied and a3 to the dest pointer after all the (d)word chunks have
been copied. We will loop, incrementing a0 until a0 equals a3. */
L(chk1w):
andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */
beq a2,t8,L(lastb)
PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */
PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */
/* copying in words (4-byte or 8 byte chunks) */
L(wordCopy_loop):
PTR_ADDIU a0,a0,UNIT(1)
bne a0,a3,L(wordCopy_loop)
C_ST a1,UNIT(-1)(a0)
/* Copy the last 8 (or 16) bytes */
L(lastb):
blez a2,L(leave)
PTR_ADDU a3,a0,a2 /* a3 is the last dst address */
L(lastbloop):
PTR_ADDIU a0,a0,1
bne a0,a3,L(lastbloop)
sb a1,-1(a0) sb a1,-1(a0)
L(leave): .Llast4e:
j ra j ra
nop nop
.set at .set at
.set reorder .set reorder
END(MEMSET_NAME)
#ifndef __ANDROID__ END(memset)
# ifdef _LIBC
libc_hidden_builtin_def (MEMSET_NAME)
# endif /************************************************************************
#endif * Implementation : Static functions
************************************************************************/

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2015 The Android Open Source Project * Copyright (C) 2008 The Android Open Source Project
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -25,16 +25,20 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#include <string.h>
#include <stdint.h>
#undef _FORTIFY_SOURCE void* memset(void* dst, int c, size_t n)
{
char* q = dst;
char* end = q + n;
#include <unistd.h> for (;;) {
#include "private/libc_logging.h" if (q >= end) break; *q++ = (char) c;
if (q >= end) break; *q++ = (char) c;
extern char* __getcwd_chk(char* buf, size_t len, size_t buflen) { if (q >= end) break; *q++ = (char) c;
if (__predict_false(len > buflen)) { if (q >= end) break; *q++ = (char) c;
__fortify_chk_fail("getcwd: prevented write past end of buffer", 0);
} }
return getcwd(buf, len); return dst;
} }

View File

@ -1,260 +0,0 @@
/*
* Copyright (c) 2014
* Imagination Technologies Limited.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY IMAGINATION TECHNOLOGIES LIMITED ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL IMAGINATION TECHNOLOGIES LIMITED BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef __ANDROID__
# include <private/bionic_asm.h>
#elif _LIBC
# include <sysdep.h>
# include <regdef.h>
# include <sys/asm.h>
#elif _COMPILING_NEWLIB
# include "machine/asm.h"
# include "machine/regdef.h"
#else
# include <regdef.h>
# include <sys/asm.h>
#endif
/* Technically strcmp should not read past the end of the strings being
compared. We will read a full word that may contain excess bits beyond
the NULL string terminator but unless ENABLE_READAHEAD is set, we will not
read the next word after the end of string. Setting ENABLE_READAHEAD will
improve performance but is technically illegal based on the definition of
strcmp. */
#ifdef ENABLE_READAHEAD
# define DELAY_READ
#else
# define DELAY_READ nop
#endif
/* Testing on a little endian machine showed using CLZ was a
performance loss, so we are not turning it on by default. */
#if defined(ENABLE_CLZ) && (__mips_isa_rev > 1)
# define USE_CLZ
#endif
/* Some asm.h files do not have the L macro definition. */
#ifndef L
# if _MIPS_SIM == _ABIO32
# define L(label) $L ## label
# else
# define L(label) .L ## label
# endif
#endif
/* Some asm.h files do not have the PTR_ADDIU macro definition. */
#ifndef PTR_ADDIU
# if _MIPS_SIM == _ABIO32
# define PTR_ADDIU addiu
# else
# define PTR_ADDIU daddiu
# endif
#endif
/* Allow the routine to be named something else if desired. */
#ifndef STRCMP_NAME
# define STRCMP_NAME strcmp
#endif
#ifdef __ANDROID__
LEAF(STRCMP_NAME, 0)
#else
LEAF(STRCMP_NAME)
#endif
.set nomips16
.set noreorder
or t0, a0, a1
andi t0,0x3
bne t0, zero, L(byteloop)
/* Both strings are 4 byte aligned at this point. */
lui t8, 0x0101
ori t8, t8, 0x0101
lui t9, 0x7f7f
ori t9, 0x7f7f
#define STRCMP32(OFFSET) \
lw v0, OFFSET(a0); \
lw v1, OFFSET(a1); \
subu t0, v0, t8; \
bne v0, v1, L(worddiff); \
nor t1, v0, t9; \
and t0, t0, t1; \
bne t0, zero, L(returnzero)
L(wordloop):
STRCMP32(0)
DELAY_READ
STRCMP32(4)
DELAY_READ
STRCMP32(8)
DELAY_READ
STRCMP32(12)
DELAY_READ
STRCMP32(16)
DELAY_READ
STRCMP32(20)
DELAY_READ
STRCMP32(24)
DELAY_READ
STRCMP32(28)
PTR_ADDIU a0, a0, 32
b L(wordloop)
PTR_ADDIU a1, a1, 32
L(returnzero):
j ra
move v0, zero
L(worddiff):
#ifdef USE_CLZ
subu t0, v0, t8
nor t1, v0, t9
and t1, t0, t1
xor t0, v0, v1
or t0, t0, t1
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
wsbh t0, t0
rotr t0, t0, 16
# endif
clz t1, t0
and t1, 0xf8
# if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
neg t1
addu t1, 24
# endif
rotrv v0, v0, t1
rotrv v1, v1, t1
and v0, v0, 0xff
and v1, v1, 0xff
j ra
subu v0, v0, v1
#else /* USE_CLZ */
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
andi t0, v0, 0xff
beq t0, zero, L(wexit01)
andi t1, v1, 0xff
bne t0, t1, L(wexit01)
srl t8, v0, 8
srl t9, v1, 8
andi t8, t8, 0xff
beq t8, zero, L(wexit89)
andi t9, t9, 0xff
bne t8, t9, L(wexit89)
srl t0, v0, 16
srl t1, v1, 16
andi t0, t0, 0xff
beq t0, zero, L(wexit01)
andi t1, t1, 0xff
bne t0, t1, L(wexit01)
srl t8, v0, 24
srl t9, v1, 24
# else /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
srl t0, v0, 24
beq t0, zero, L(wexit01)
srl t1, v1, 24
bne t0, t1, L(wexit01)
srl t8, v0, 16
srl t9, v1, 16
andi t8, t8, 0xff
beq t8, zero, L(wexit89)
andi t9, t9, 0xff
bne t8, t9, L(wexit89)
srl t0, v0, 8
srl t1, v1, 8
andi t0, t0, 0xff
beq t0, zero, L(wexit01)
andi t1, t1, 0xff
bne t0, t1, L(wexit01)
andi t8, v0, 0xff
andi t9, v1, 0xff
# endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
L(wexit89):
j ra
subu v0, t8, t9
L(wexit01):
j ra
subu v0, t0, t1
#endif /* USE_CLZ */
/* It might seem better to do the 'beq' instruction between the two 'lbu'
instructions so that the nop is not needed but testing showed that this
code is actually faster (based on glibc strcmp test). */
#define BYTECMP01(OFFSET) \
lbu v0, OFFSET(a0); \
lbu v1, OFFSET(a1); \
beq v0, zero, L(bexit01); \
nop; \
bne v0, v1, L(bexit01)
#define BYTECMP89(OFFSET) \
lbu t8, OFFSET(a0); \
lbu t9, OFFSET(a1); \
beq t8, zero, L(bexit89); \
nop; \
bne t8, t9, L(bexit89)
L(byteloop):
BYTECMP01(0)
BYTECMP89(1)
BYTECMP01(2)
BYTECMP89(3)
BYTECMP01(4)
BYTECMP89(5)
BYTECMP01(6)
BYTECMP89(7)
PTR_ADDIU a0, a0, 8
b L(byteloop)
PTR_ADDIU a1, a1, 8
L(bexit01):
j ra
subu v0, v0, v1
L(bexit89):
j ra
subu v0, t8, t9
.set at
.set reorder
END(STRCMP_NAME)
#ifndef __ANDROID__
# ifdef _LIBC
libc_hidden_builtin_def (STRCMP_NAME)
# endif
#endif

Some files were not shown because too many files have changed in this diff Show More