Compare commits

..

28 Commits

Author SHA1 Message Date
Colin Cross
635ff5fdac Merge "Add more of the bionic architecture logic" into master-soong 2015-07-10 01:03:44 +00:00
Colin Cross
a56904273c Merge "Add version_script to bionic Blueprints" into master-soong 2015-07-10 01:00:49 +00:00
Colin Cross
f93bddbbd8 Merge "Add translation directives to libc Android.bp" into master-soong 2015-07-10 01:00:41 +00:00
Colin Cross
2803a15d4c Add more of the bionic architecture logic
Change-Id: I8aecabdc7219ad9e9affab1a3e67164352858bf9
2015-07-09 17:42:58 -07:00
Colin Cross
70f11bd69b Add version_script to bionic Blueprints
Change-Id: Ieddd80fa599c21414608e1d1c20ebaa2507a66ef
2015-07-09 17:35:15 -07:00
Colin Cross
8f7a4a3cb5 Add translation directives to libc Android.bp
Add translation directives for the crt*.o files instead of trying
to handle them in the translator.

Change-Id: I44a491f1823f483d9c40368da35d4e0cf16030f2
2015-07-09 17:35:07 -07:00
Dan Willemsen
649a2ea0e2 Merge "Switch libm from thumb to arm" into master-soong 2015-07-09 05:31:41 +00:00
Dan Willemsen
c371306714 Switch libm from thumb to arm
To match the Android.mk file

Change-Id: I02cb5f4b140c03bc8630879f005e027426a5dd99
2015-07-08 19:12:09 -07:00
Dan Willemsen
87e33892fc Merge "Use exclude_srcs instead of "-file"" into master-soong 2015-07-06 19:52:39 +00:00
Dan Willemsen
7454daa97f Use exclude_srcs instead of "-file"
Change-Id: Ie07c5901233d429102f9b6afcef12ea8c4bdda2c
2015-07-01 14:00:21 -07:00
Colin Cross
dab6ead2aa Rename Blueprints to Android.bp
Rename module definition files to Android.bp to avoid conflicts
with another project called Blueprint.

Change-Id: I69cfe9649fe35735dade6416d15b171a5bb2e283
2015-05-20 13:11:07 -07:00
Colin Cross
7d0b7b4ba2 Merge "Remove nonexistant include path" into master-soong 2015-05-12 19:31:16 +00:00
Colin Cross
98f4e07237 Remove nonexistant include path
upstream-freebsd/lib/libc/include doesn't exist, remove it from the
include path.

Change-Id: I0492784db5dc45e4a9a937956d095a147a08e835
2015-05-12 11:35:48 -07:00
Colin Cross
b624072cbd Merge "Export libbenchmark include dir" into master-soong 2015-05-08 00:06:32 +00:00
Colin Cross
0ecd342743 Export libbenchmark include dir
Export the libbenchmark include directory so the build system
doesn't have manually add it for cc_benchmark modules.

Change-Id: I918a2fa5fb3104f4c2d86930ed2b9c5e00820ec6
2015-05-07 15:51:37 -07:00
Colin Cross
faa14d4de8 Merge "Fix Blueprints for building on Darwin" into master-soong 2015-05-07 21:31:49 +00:00
Colin Cross
abc97e2e71 Fix Blueprints for building on Darwin
Change-Id: I252e1b8a9ace397609f056f69aff83331b92aab7
2015-05-05 16:52:38 -07:00
Colin Cross
6549fe249c Update Blueprints files for AOSP changes
Change-Id: I915fc1e00b6e6eb1d6c08233893517b1d56c74fa
2015-04-29 11:34:24 -07:00
Colin Cross
c15e8fdb8d Merge remote-tracking branch 'aosp/master' into aosp 2015-04-29 11:29:05 -07:00
Colin Cross
90d6279802 Merge "Add Blueprints files for remaining bionic modules" into master-soong 2015-03-28 01:15:36 +00:00
Colin Cross
22d8776587 Add Blueprints files for remaining bionic modules
Change-Id: Ic9440fddb44ca1f17aad5b249535d7b96dd8d690
2015-03-27 11:14:39 -07:00
Colin Cross
51b8912253 Merge "Merge remote-tracking branch 'aosp/master' into HEAD" into master-soong 2015-03-17 19:31:44 +00:00
Colin Cross
270f2ea800 Merge remote-tracking branch 'aosp/master' into HEAD
Change-Id: Ia313444a62bcdeb676185b56ce730d0f997c8226
2015-03-17 12:30:41 -07:00
Colin Cross
7357ad0875 Merge "Update bionic Blueprints to match latest AOSP master" into master-soong 2015-03-17 00:44:29 +00:00
Colin Cross
959bc099a3 Merge remote-tracking branch 'aosp/master' into HEAD 2015-03-16 16:54:58 -07:00
Colin Cross
68a3b658b1 Update bionic Blueprints to match latest AOSP master
Change-Id: I90410ec60acfc3dcbdbcd0be6f283a90f4395643
2015-03-16 16:31:26 -07:00
Colin Cross
062d498e28 Merge "Initial bionic Blueprints files" into master-soong 2015-03-14 06:41:45 +00:00
Colin Cross
d2b8741e1b Initial bionic Blueprints files
Change-Id: Iafe8e84e0dc62e7d7c830e2c272ec92abdf6a801
2015-03-10 14:11:55 -07:00
236 changed files with 6759 additions and 7869 deletions

1
Android.bp Normal file
View File

@@ -0,0 +1 @@
subdirs = ["*"]

View File

@@ -194,15 +194,14 @@ The tests are all built from the tests/ directory.
### Device tests
$ mma
$ adb remount
$ adb sync
$ adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests32
$ adb shell \
/data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static32
# Only for 64-bit targets
$ adb shell /data/nativetest64/bionic-unit-tests/bionic-unit-tests64
$ adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests64
$ adb shell \
/data/nativetest64/bionic-unit-tests-static/bionic-unit-tests-static64
/data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static64
### Host tests

123
benchmarks/Android.bp Normal file
View File

@@ -0,0 +1,123 @@
//
// Copyright (C) 2013 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// Benchmarks library, usable by projects outside this directory.
// -----------------------------------------------------------------------------
benchmark_cflags = [
"-O2",
"-fno-builtin",
"-Wall",
"-Wextra",
"-Werror",
"-Wunused",
]
benchmark_cppflags = ["-std=gnu++11"]
benchmarklib_src_files = [
"Benchmark.cpp",
"utils.cpp",
"main.cpp",
]
cc_library_static {
name: "libbenchmark",
host_supported: true,
cflags: benchmark_cflags,
cppflags: benchmark_cppflags,
srcs: benchmarklib_src_files,
static_libs: ["libbase"],
export_include_dirs: ["."],
target: {
darwin: {
// Only supported on linux systems.
disabled: true,
},
},
}
// -----------------------------------------------------------------------------
// Benchmarks.
// -----------------------------------------------------------------------------
benchmark_src_files = [
"math_benchmark.cpp",
"property_benchmark.cpp",
"pthread_benchmark.cpp",
"semaphore_benchmark.cpp",
"stdio_benchmark.cpp",
"string_benchmark.cpp",
"time_benchmark.cpp",
"unistd_benchmark.cpp",
]
// Build benchmarks for the device (with bionic's .so). Run with:
// adb shell bionic-benchmarks32
// adb shell bionic-benchmarks64
cc_binary {
name: "bionic-benchmarks",
multilib: {
lib32: {
suffix: "32",
},
lib64: {
suffix: "64",
},
},
compile_multilib: "both",
cflags: benchmark_cflags,
cppflags: benchmark_cppflags,
srcs: benchmark_src_files,
static_libs: [
"libbenchmark",
"libbase",
],
}
// We don't build a static benchmark executable because it's not usually
// useful. If you're trying to run the current benchmarks on an older
// release, it's (so far at least) always because you want to measure the
// performance of the old release's libc, and a static benchmark isn't
// going to let you do that.
// Build benchmarks for the host (against glibc!). Run with:
cc_binary_host {
name: "bionic-benchmarks-glibc",
multilib: {
lib32: {
stem: "bionic-benchmarks-glibc32",
},
lib64: {
stem: "bionic-benchmarks-glibc64",
},
},
compile_multilib: "both",
cflags: benchmark_cflags,
cppflags: benchmark_cppflags,
ldflags: ["-lrt"],
srcs: benchmark_src_files,
static_libs: [
"libbenchmark",
"libbase",
],
target: {
darwin: {
// Only supported on linux systems.
disabled: true,
},
},
}

View File

@@ -29,6 +29,7 @@ benchmark_cflags := \
-Wunused \
benchmark_cppflags := \
-std=gnu++11 \
benchmarklib_src_files := \
Benchmark.cpp \

View File

@@ -16,7 +16,6 @@
#include <stdio.h>
#include <stdio_ext.h>
#include <stdlib.h>
#include <benchmark/Benchmark.h>
@@ -74,7 +73,7 @@ static void FopenFgetsFclose(int iters, bool no_locking) {
for (int i = 0; i < iters; ++i) {
FILE* fp = fopen("/proc/version", "re");
if (no_locking) __fsetlocking(fp, FSETLOCKING_BYCALLER);
if (fgets(buf, sizeof(buf), fp) == nullptr) abort();
fgets(buf, sizeof(buf), fp);
fclose(fp);
}
}

View File

@@ -17,7 +17,6 @@
#include <sys/syscall.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#include <benchmark/Benchmark.h>

View File

@@ -14,8 +14,6 @@
* limitations under the License.
*/
#include "utils.h"
#include <inttypes.h>
#include <stdio.h>
#include <stdint.h>
@@ -23,7 +21,7 @@
#include <string>
#include <base/stringprintf.h>
#include "utils.h"
int Round(int n) {
int base = 1;
@@ -74,7 +72,10 @@ std::string PrettyInt(long value, size_t base) {
break;
}
}
return android::base::StringPrintf("%s%" PRId64 "%s",
negative_number ? "-" : "",
count / kAmountPerUnit[i], kUnitStrings[i]);
char* s = NULL;
asprintf(&s, "%s%" PRId64 "%s", (negative_number ? "-" : ""),
count / kAmountPerUnit[i], kUnitStrings[i]);
std::string result(s);
free(s);
return result;
}

2261
libc/Android.bp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -70,9 +70,6 @@ libc_common_src_files := \
libc_common_src_files += \
bionic/__FD_chk.cpp \
bionic/__fgets_chk.cpp \
bionic/__fread_chk.cpp \
bionic/__fwrite_chk.cpp \
bionic/__getcwd_chk.cpp \
bionic/__memchr_chk.cpp \
bionic/__memmove_chk.cpp \
bionic/__memrchr_chk.cpp \
@@ -132,12 +129,8 @@ libc_bionic_ndk_src_files := \
bionic/fchmod.cpp \
bionic/fchmodat.cpp \
bionic/ffs.cpp \
bionic/fgetxattr.cpp \
bionic/flistxattr.cpp \
bionic/flockfile.cpp \
bionic/fpclassify.cpp \
bionic/fsetxattr.cpp \
bionic/ftruncate.cpp \
bionic/futimens.cpp \
bionic/getcwd.cpp \
bionic/gethostname.cpp \
@@ -553,16 +546,14 @@ libc_thread_atexit_impl_src_files := \
libc_arch_static_src_files := \
bionic/dl_iterate_phdr_static.cpp \
# Various kinds of cruft.
# Various kinds of LP32 cruft.
# ========================================================
libc_common_src_files += \
bionic/ndk_cruft.cpp \
libc_bionic_ndk_src_files_32 += \
libc_bionic_src_files_32 += \
bionic/mmap.cpp \
libc_common_src_files_32 += \
bionic/legacy_32_bit_support.cpp \
bionic/ndk_cruft.cpp \
bionic/time64.c \
libc_netbsd_src_files_32 += \
@@ -578,14 +569,11 @@ libc_common_cflags := \
-D_LIBC=1 \
-Wall -Wextra -Wunused \
use_clang := $(USE_CLANG_PLATFORM_BUILD)
# Clang/llvm has incompatible long double (fp128) for x86_64.
# https://llvm.org/bugs/show_bug.cgi?id=23897
ifeq ($(TARGET_ARCH),x86_64)
use_clang := false
ifneq ($(TARGET_USES_LOGD),false)
libc_common_cflags += -DTARGET_USES_LOGD
endif
use_clang := $(USE_CLANG_PLATFORM_BUILD)
ifeq ($(use_clang),)
use_clang := false
endif
@@ -623,6 +611,7 @@ libc_common_conlyflags := \
# Define some common cppflags
libc_common_cppflags := \
-std=gnu++11
# Define some common includes
# ========================================================
@@ -663,7 +652,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -704,7 +693,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -750,7 +739,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -783,7 +772,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -818,7 +807,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -859,7 +848,7 @@ LOCAL_MODULE := libc_openbsd_ndk
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -897,7 +886,7 @@ LOCAL_MODULE := libc_openbsd
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -935,7 +924,7 @@ LOCAL_MODULE := libc_gdtoa
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -952,6 +941,10 @@ LOCAL_SRC_FILES := $(libc_bionic_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \
# memcpy.S, memchr.S, etc. do not compile with Clang.
LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include
@@ -960,11 +953,10 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_bionic_src_files))
include $(BUILD_STATIC_LIBRARY)
@@ -980,6 +972,10 @@ LOCAL_SRC_FILES := $(libc_bionic_ndk_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \
# memcpy.S, memchr.S, etc. do not compile with Clang.
LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include
@@ -988,11 +984,11 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_bionic_ndk_src_files))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_bionic_src_files))
include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
@@ -1009,7 +1005,7 @@ LOCAL_CLANG := false
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY)
@@ -1028,6 +1024,10 @@ LOCAL_SRC_FILES := $(libc_pthread_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \
# memcpy.S, memchr.S, etc. do not compile with Clang.
LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes)
@@ -1036,7 +1036,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY)
@@ -1060,7 +1060,7 @@ LOCAL_CLANG := true # GCC refuses to hide new/delete
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
# b/17574078: Need to disable coverage until we have a prebuilt libprofile_rt.
# Since this is a static library built with clang, it needs to link
# libprofile_rt when it is linked into the final binary. Since the final binary
@@ -1088,7 +1088,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY)
@@ -1110,7 +1110,7 @@ LOCAL_CFLAGS := $(libc_common_cflags) -fno-builtin
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY)
@@ -1136,7 +1136,7 @@ LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CFLAGS := $(libc_common_cflags) -fvisibility=hidden -O0
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
LOCAL_SYSTEM_SHARED_LIBRARIES :=
@@ -1220,7 +1220,7 @@ LOCAL_SYSTEM_SHARED_LIBRARIES :=
# TODO: split out the asflags.
LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -1258,7 +1258,7 @@ LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -1281,7 +1281,7 @@ LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_malloc
LOCAL_CLANG := $(use_clang)
LOCAL_CXX_STL := none
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
include $(BUILD_STATIC_LIBRARY)
@@ -1314,7 +1314,7 @@ endif
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -1346,7 +1346,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_REQUIRED_MODULES := tzdata
LOCAL_ADDITIONAL_DEPENDENCIES := \
$(libc_common_additional_dependencies) \
$(LOCAL_PATH)/libc.map \
$(LOCAL_PATH)/version_script.txt \
# Leave the symbols in the shared library so that stack unwinders can produce
# meaningful name resolution.
@@ -1374,16 +1374,16 @@ LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
# Don't re-export new/delete and friends, even if the compiler really wants to.
LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/libc.map
LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/version_script.txt
# We'd really like to do this for all architectures, but since this wasn't done
# before, these symbols must continue to be exported on LP32 for binary
# compatibility.
LOCAL_LDFLAGS_64 := -Wl,--exclude-libs,libgcc.a
# TODO: disabled for http://b/20065774.
#LOCAL_LDFLAGS_64 := -Wl,--exclude-libs,libgcc.a
# TODO: This is to work around b/19059885. Remove after root cause is fixed
LOCAL_LDFLAGS_arm := -Wl,--hash-style=both
LOCAL_LDFLAGS_x86 := -Wl,--hash-style=both
LOCAL_LDFLAGS_arm := -Wl,--hash-style=sysv
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_arch_dynamic_src_files))
@@ -1396,7 +1396,7 @@ LOCAL_CFLAGS_arm += -DCRT_LEGACY_WORKAROUND
LOCAL_SRC_FILES_arm += \
arch-arm/bionic/atexit_legacy.c
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_SHARED_LIBRARY)
@@ -1450,7 +1450,7 @@ LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/version_script.txt
# Don't install on release build
LOCAL_MODULE_TAGS := eng debug
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -1490,7 +1490,7 @@ LOCAL_LDFLAGS := -Wl,--version-script,$(LOCAL_PATH)/version_script.txt
# Don't install on release build
LOCAL_MODULE_TAGS := eng debug
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
@@ -1513,15 +1513,14 @@ LOCAL_CFLAGS := $(libc_common_cflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
# TODO: This is to work around b/19059885. Remove after root cause is fixed
LOCAL_LDFLAGS_arm := -Wl,--hash-style=both
LOCAL_LDFLAGS_x86 := -Wl,--hash-style=both
LOCAL_LDFLAGS_arm := -Wl,--hash-style=sysv
LOCAL_SRC_FILES := $(libstdcxx_common_src_files)
LOCAL_MODULE:= libstdc++
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := libc
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_SHARED_LIBRARY)
@@ -1537,7 +1536,7 @@ LOCAL_MODULE:= libstdc++
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES := libc
LOCAL_SANITIZE := never
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY)

View File

@@ -307,6 +307,34 @@ SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (C) 2008-2010 The Android Open Source Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (C) 2009 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
@@ -426,6 +454,22 @@ Android adaptation and tweak by Jim Huang <jserv@0xlab.org>.
-------------------------------------------------------------------
Copyright (C) 2011 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------
Copyright (C) 2011 The Android Open Source Project
All rights reserved.
@@ -644,50 +688,6 @@ SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (C) 2015 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------
Copyright (C) 2015 The Android Open Source Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 1980, 1983, 1988, 1993
The Regents of the University of California. All rights reserved.
@@ -2550,6 +2550,33 @@ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright (c) 1995, 1996 Carnegie-Mellon University.
All rights reserved.
Author: Chris G. Demetriou
Permission to use, copy, modify and distribute this software and
its documentation is hereby granted, provided that both the copyright
notice and this permission notice appear in all copies of the
software, derivative works or modified versions, and any portions
thereof, and that both notices appear in supporting documentation.
CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
Carnegie Mellon requests users of this software to return to
Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
School of Computer Science
Carnegie Mellon University
Pittsburgh PA 15213-3890
any improvements or extensions that they make and grant Carnegie the
rights to redistribute these changes.
-------------------------------------------------------------------
Copyright (c) 1996 by Internet Software Consortium.
Permission to use, copy, modify, and distribute this software for any
@@ -3768,22 +3795,6 @@ OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-------------------------------------------------------------------
Copyright (c) 2007 Todd C. Miller <Todd.Miller@courtesan.com>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-------------------------------------------------------------------
Copyright (c) 2007-2008 Michael G Schwern
This software originally derived from Paul Sheer's pivotal_gmtime_r.c.
@@ -4722,6 +4733,31 @@ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright 2000 David E. O'Brien, John D. Polstra.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------
Copyright 2008 Android Open Source Project (source port randomization)
Copyright (c) 1985, 1989, 1993
The Regents of the University of California. All rights reserved.

View File

@@ -77,6 +77,7 @@ int prlimit64(pid_t, int, struct rlimit64*, const struct rlimit64*) arm,mips,x8
int setgroups:setgroups32(int, const gid_t*) arm,x86
int setgroups:setgroups(int, const gid_t*) arm64,mips,mips64,x86_64
int setpgid(pid_t, pid_t) all
pid_t vfork(void) arm
int setregid:setregid32(gid_t, gid_t) arm,x86
int setregid:setregid(gid_t, gid_t) arm64,mips,mips64,x86_64
int chroot(const char*) all
@@ -121,9 +122,9 @@ int fdatasync(int) all
int fchown:fchown32(int, uid_t, gid_t) arm,x86
int fchown:fchown(int, uid_t, gid_t) arm64,mips,mips64,x86_64
void sync(void) all
int ___fsetxattr:fsetxattr(int, const char*, const void*, size_t, int) all
ssize_t ___fgetxattr:fgetxattr(int, const char*, void*, size_t) all
ssize_t ___flistxattr:flistxattr(int, char*, size_t) all
int fsetxattr(int, const char*, const void*, size_t, int) all
ssize_t fgetxattr(int, const char*, void*, size_t) all
ssize_t flistxattr(int, char*, size_t) all
int fremovexattr(int, const char*) all
int __getdents64:getdents64(unsigned int, struct dirent*, unsigned int) arm,arm64,mips,mips64,x86,x86_64
@@ -150,6 +151,7 @@ int utimensat(int, const char*, const struct timespec times[2], int) all
off_t lseek(int, off_t, int) arm,mips,x86
int __llseek:_llseek(int, unsigned long, unsigned long, off64_t*, int) arm,mips,x86
off_t lseek|lseek64(int, off_t, int) arm64,mips64,x86_64
int ftruncate(int, off_t) arm,mips,x86
int ftruncate64(int, off64_t) arm,mips,x86
int ftruncate|ftruncate64(int, off_t) arm64,mips64,x86_64
ssize_t sendfile(int out_fd, int in_fd, off_t* offset, size_t count) arm,mips,x86
@@ -202,7 +204,7 @@ clock_t times(struct tms*) all
int nanosleep(const struct timespec*, struct timespec*) all
int clock_settime(clockid_t, const struct timespec*) all
int clock_getres(clockid_t, struct timespec*) all
int ___clock_nanosleep:clock_nanosleep(clockid_t, int, const struct timespec*, struct timespec*) all
int __clock_nanosleep:clock_nanosleep(clockid_t, int, const struct timespec*, struct timespec*) all
int getitimer(int, const struct itimerval*) all
int setitimer(int, const struct itimerval*, struct itimerval*) all
int __timer_create:timer_create(clockid_t clockid, struct sigevent* evp, __kernel_timer_t* timerid) all
@@ -221,7 +223,7 @@ int __rt_sigpending:rt_sigpending(sigset_t*, size_t) all
int __rt_sigprocmask:rt_sigprocmask(int, const sigset_t*, sigset_t*, size_t) all
int __rt_sigsuspend:rt_sigsuspend(const sigset_t*, size_t) all
int __rt_sigtimedwait:rt_sigtimedwait(const sigset_t*, struct siginfo_t*, struct timespec_t*, size_t) all
int ___rt_sigqueueinfo:rt_sigqueueinfo(pid_t, int, siginfo_t*) all
int __rt_sigqueueinfo:rt_sigqueueinfo(pid_t, int, siginfo_t*) all
int __signalfd4:signalfd4(int, const sigset_t*, size_t, int) all
# sockets
@@ -307,9 +309,6 @@ int inotify_rm_watch(int, unsigned int) all
int __pselect6:pselect6(int, fd_set*, fd_set*, fd_set*, timespec*, void*) all
int __ppoll:ppoll(pollfd*, unsigned int, timespec*, const sigset_t*, size_t) all
ssize_t process_vm_readv(pid_t, const struct iovec*, unsigned long, const struct iovec*, unsigned long, unsigned long) all
ssize_t process_vm_writev(pid_t, const struct iovec*, unsigned long, const struct iovec*, unsigned long, unsigned long) all
int __set_tid_address:set_tid_address(int*) all
int setfsgid(gid_t) all
@@ -332,7 +331,7 @@ int __set_tls:set_thread_area(void*) mips,mips64
int __set_thread_area:set_thread_area(void*) x86
# vdso stuff.
int clock_gettime(clockid_t, timespec*) arm,mips,mips64
int __clock_gettime:clock_gettime(clockid_t, timespec*) arm64,x86,x86_64
int gettimeofday(timeval*, timezone*) arm,mips,mips64
int __gettimeofday:gettimeofday(timeval*, timezone*) arm64,x86,x86_64
int clock_gettime(clockid_t, timespec*) arm,mips,mips64,x86
int __clock_gettime:clock_gettime(clockid_t, timespec*) arm64,x86_64
int gettimeofday(timeval*, timezone*) arm,mips,mips64,x86
int __gettimeofday:gettimeofday(timeval*, timezone*) arm64,x86_64

View File

@@ -39,10 +39,10 @@ libc_bionic_src_files_arm += \
arch-arm/bionic/__bionic_clone.S \
arch-arm/bionic/_exit_with_stack_teardown.S \
arch-arm/bionic/libgcc_compat.c \
arch-arm/bionic/libgcc_protect_unwind.c \
arch-arm/bionic/__restore.S \
arch-arm/bionic/setjmp.S \
arch-arm/bionic/syscall.S \
arch-arm/bionic/vfork.S \
libc_arch_static_src_files_arm := arch-arm/bionic/exidx_static.c
libc_arch_dynamic_src_files_arm := arch-arm/bionic/exidx_dynamic.c

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (C) 2008-2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@@ -0,0 +1,93 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO: This file should go away once unwinder migration to libc++.so is complete.
extern char _Unwind_Backtrace __attribute((visibility("protected")));
extern char __gnu_Unwind_Find_exidx __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_VFP_D __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_VFP __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_VFP_D_16_to_31 __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_WMMXD __attribute((visibility("protected")));
extern char __gnu_Unwind_Restore_WMMXC __attribute((visibility("protected")));
extern char _Unwind_GetCFA __attribute((visibility("protected")));
extern char __gnu_Unwind_RaiseException __attribute((visibility("protected")));
extern char __gnu_Unwind_ForcedUnwind __attribute((visibility("protected")));
extern char __gnu_Unwind_Resume __attribute((visibility("protected")));
extern char __gnu_Unwind_Resume_or_Rethrow __attribute((visibility("protected")));
extern char _Unwind_Complete __attribute((visibility("protected")));
extern char _Unwind_DeleteException __attribute((visibility("protected")));
extern char _Unwind_VRS_Get __attribute((visibility("protected")));
extern char _Unwind_VRS_Set __attribute((visibility("protected")));
extern char __gnu_Unwind_Backtrace __attribute((visibility("protected")));
extern char _Unwind_VRS_Pop __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_VFP_D __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_VFP __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_VFP_D_16_to_31 __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_WMMXD __attribute((visibility("protected")));
extern char __gnu_Unwind_Save_WMMXC __attribute((visibility("protected")));
extern char ___Unwind_RaiseException __attribute((visibility("protected")));
extern char _Unwind_RaiseException __attribute((visibility("protected")));
extern char ___Unwind_Resume __attribute((visibility("protected")));
extern char _Unwind_Resume __attribute((visibility("protected")));
extern char ___Unwind_Resume_or_Rethrow __attribute((visibility("protected")));
extern char _Unwind_Resume_or_Rethrow __attribute((visibility("protected")));
extern char ___Unwind_ForcedUnwind __attribute((visibility("protected")));
extern char _Unwind_ForcedUnwind __attribute((visibility("protected")));
extern char ___Unwind_Backtrace __attribute((visibility("protected")));
extern char _Unwind_GetRegionStart __attribute((visibility("protected")));
extern char _Unwind_GetLanguageSpecificData __attribute((visibility("protected")));
extern char _Unwind_GetDataRelBase __attribute((visibility("protected")));
extern char _Unwind_GetTextRelBase __attribute((visibility("protected")));
void* __bionic_libgcc_unwind_symbols[] = {
&_Unwind_Backtrace,
&__gnu_Unwind_Find_exidx,
&__gnu_Unwind_Restore_VFP_D,
&__gnu_Unwind_Restore_VFP,
&__gnu_Unwind_Restore_VFP_D_16_to_31,
&__gnu_Unwind_Restore_WMMXD,
&__gnu_Unwind_Restore_WMMXC,
&_Unwind_GetCFA,
&__gnu_Unwind_RaiseException,
&__gnu_Unwind_ForcedUnwind,
&__gnu_Unwind_Resume,
&__gnu_Unwind_Resume_or_Rethrow,
&_Unwind_Complete,
&_Unwind_DeleteException,
&_Unwind_VRS_Get,
&_Unwind_VRS_Set,
&__gnu_Unwind_Backtrace,
&_Unwind_VRS_Pop,
&__gnu_Unwind_Save_VFP_D,
&__gnu_Unwind_Save_VFP,
&__gnu_Unwind_Save_VFP_D_16_to_31,
&__gnu_Unwind_Save_WMMXD,
&__gnu_Unwind_Save_WMMXC,
&___Unwind_RaiseException,
&_Unwind_RaiseException,
&___Unwind_Resume,
&_Unwind_Resume,
&___Unwind_Resume_or_Rethrow,
&_Unwind_Resume_or_Rethrow,
&___Unwind_ForcedUnwind,
&_Unwind_ForcedUnwind,
&___Unwind_Backtrace,
&_Unwind_GetRegionStart,
&_Unwind_GetLanguageSpecificData,
&_Unwind_GetDataRelBase,
&_Unwind_GetTextRelBase,
};

View File

@@ -1,46 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <private/bionic_asm.h>
ENTRY(vfork)
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
mrc p15, 0, r3, c13, c0, 3
ldr r3, [r3, #4]
mov r0, #0
str r0, [r3, #12]
mov ip, r7
ldr r7, =__NR_vfork
swi #0
mov r7, ip
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(vfork)

View File

@@ -70,7 +70,7 @@
.macro m_scan_byte
ldrb r3, [r0]
cbz r3, .L_strcat_r0_scan_done
cbz r3, strcat_r0_scan_done
add r0, #1
.endm // m_scan_byte
@@ -84,10 +84,10 @@ ENTRY(strcat)
// Quick check to see if src is empty.
ldrb r2, [r1]
pld [r1, #0]
cbnz r2, .L_strcat_continue
cbnz r2, strcat_continue
bx lr
.L_strcat_continue:
strcat_continue:
// To speed up really small dst strings, unroll checking the first 4 bytes.
m_push
m_scan_byte
@@ -96,95 +96,95 @@ ENTRY(strcat)
m_scan_byte
ands r3, r0, #7
beq .L_strcat_mainloop
beq strcat_mainloop
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .L_strcat_align_to_32
beq strcat_align_to_32
ldrb r5, [r0]
cbz r5, .L_strcat_r0_scan_done
cbz r5, strcat_r0_scan_done
add r0, r0, #1
.L_strcat_align_to_32:
bcc .L_strcat_align_to_64
strcat_align_to_32:
bcc strcat_align_to_64
ldrb r2, [r0]
cbz r2, .L_strcat_r0_scan_done
cbz r2, strcat_r0_scan_done
add r0, r0, #1
ldrb r4, [r0]
cbz r4, .L_strcat_r0_scan_done
cbz r4, strcat_r0_scan_done
add r0, r0, #1
.L_strcat_align_to_64:
strcat_align_to_64:
tst r3, #4
beq .L_strcat_mainloop
beq strcat_mainloop
ldr r3, [r0], #4
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_strcat_zero_in_second_register
b .L_strcat_mainloop
bne strcat_zero_in_second_register
b strcat_mainloop
.L_strcat_r0_scan_done:
strcat_r0_scan_done:
// For short copies, hard-code checking the first 8 bytes since this
// new code doesn't win until after about 8 bytes.
m_copy_byte reg=r2, cmd=cbz, label=.L_strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=.L_strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=.L_strcpy_finish
m_copy_byte reg=r5, cmd=cbz, label=.L_strcpy_finish
m_copy_byte reg=r2, cmd=cbz, label=.L_strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=.L_strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=.L_strcpy_finish
m_copy_byte reg=r5, cmd=cbnz, label=.L_strcpy_continue
m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
.L_strcpy_finish:
strcpy_finish:
m_pop
.L_strcpy_continue:
strcpy_continue:
ands r3, r0, #7
beq .L_strcpy_check_src_align
beq strcpy_check_src_align
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .L_strcpy_align_to_32
beq strcpy_align_to_32
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .L_strcpy_complete
cbz r2, strcpy_complete
.L_strcpy_align_to_32:
bcc .L_strcpy_align_to_64
strcpy_align_to_32:
bcc strcpy_align_to_64
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .L_strcpy_complete
cbz r2, strcpy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .L_strcpy_complete
cbz r2, strcpy_complete
.L_strcpy_align_to_64:
strcpy_align_to_64:
tst r3, #4
beq .L_strcpy_check_src_align
beq strcpy_check_src_align
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register
bne strcpy_zero_in_first_register
str r2, [r0], #4
.L_strcpy_check_src_align:
strcpy_check_src_align:
// At this point dst is aligned to a double word, check if src
// is also aligned to a double word.
ands r3, r1, #7
bne .L_strcpy_unaligned_copy
bne strcpy_unaligned_copy
.p2align 2
.L_strcpy_mainloop:
strcpy_mainloop:
ldrd r2, r3, [r1], #8
pld [r1, #64]
@@ -192,128 +192,128 @@ ENTRY(strcat)
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register
bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b .L_strcpy_mainloop
b strcpy_mainloop
.L_strcpy_complete:
strcpy_complete:
m_pop
.L_strcpy_zero_in_first_register:
strcpy_zero_in_first_register:
lsls lr, ip, #17
bne .L_strcpy_copy1byte
bcs .L_strcpy_copy2bytes
bne strcpy_copy1byte
bcs strcpy_copy2bytes
lsls ip, ip, #1
bne .L_strcpy_copy3bytes
bne strcpy_copy3bytes
.L_strcpy_copy4bytes:
strcpy_copy4bytes:
// Copy 4 bytes to the destiniation.
str r2, [r0]
m_pop
.L_strcpy_copy1byte:
strcpy_copy1byte:
strb r2, [r0]
m_pop
.L_strcpy_copy2bytes:
strcpy_copy2bytes:
strh r2, [r0]
m_pop
.L_strcpy_copy3bytes:
strcpy_copy3bytes:
strh r2, [r0], #2
lsr r2, #16
strb r2, [r0]
m_pop
.L_strcpy_zero_in_second_register:
strcpy_zero_in_second_register:
lsls lr, ip, #17
bne .L_strcpy_copy5bytes
bcs .L_strcpy_copy6bytes
bne strcpy_copy5bytes
bcs strcpy_copy6bytes
lsls ip, ip, #1
bne .L_strcpy_copy7bytes
bne strcpy_copy7bytes
// Copy 8 bytes to the destination.
strd r2, r3, [r0]
m_pop
.L_strcpy_copy5bytes:
strcpy_copy5bytes:
str r2, [r0], #4
strb r3, [r0]
m_pop
.L_strcpy_copy6bytes:
strcpy_copy6bytes:
str r2, [r0], #4
strh r3, [r0]
m_pop
.L_strcpy_copy7bytes:
strcpy_copy7bytes:
str r2, [r0], #4
strh r3, [r0], #2
lsr r3, #16
strb r3, [r0]
m_pop
.L_strcpy_unaligned_copy:
strcpy_unaligned_copy:
// Dst is aligned to a double word, while src is at an unknown alignment.
// There are 7 different versions of the unaligned copy code
// to prevent overreading the src. The mainloop of every single version
// will store 64 bits per loop. The difference is how much of src can
// be read without potentially crossing a page boundary.
tbb [pc, r3]
.L_strcpy_unaligned_branchtable:
strcpy_unaligned_branchtable:
.byte 0
.byte ((.L_strcpy_unalign7 - .L_strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign6 - .L_strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign5 - .L_strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign4 - .L_strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign3 - .L_strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign2 - .L_strcpy_unaligned_branchtable)/2)
.byte ((.L_strcpy_unalign1 - .L_strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
.p2align 2
// Can read 7 bytes before possibly crossing a page.
.L_strcpy_unalign7:
strcpy_unalign7:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register
bne strcpy_zero_in_first_register
ldrb r3, [r1]
cbz r3, .L_strcpy_unalign7_copy5bytes
cbz r3, strcpy_unalign7_copy5bytes
ldrb r4, [r1, #1]
cbz r4, .L_strcpy_unalign7_copy6bytes
cbz r4, strcpy_unalign7_copy6bytes
ldrb r5, [r1, #2]
cbz r5, .L_strcpy_unalign7_copy7bytes
cbz r5, strcpy_unalign7_copy7bytes
ldr r3, [r1], #4
pld [r1, #64]
lsrs ip, r3, #24
strd r2, r3, [r0], #8
beq .L_strcpy_unalign_return
b .L_strcpy_unalign7
beq strcpy_unalign_return
b strcpy_unalign7
.L_strcpy_unalign7_copy5bytes:
strcpy_unalign7_copy5bytes:
str r2, [r0], #4
strb r3, [r0]
.L_strcpy_unalign_return:
strcpy_unalign_return:
m_pop
.L_strcpy_unalign7_copy6bytes:
strcpy_unalign7_copy6bytes:
str r2, [r0], #4
strb r3, [r0], #1
strb r4, [r0], #1
m_pop
.L_strcpy_unalign7_copy7bytes:
strcpy_unalign7_copy7bytes:
str r2, [r0], #4
strb r3, [r0], #1
strb r4, [r0], #1
@@ -322,41 +322,41 @@ ENTRY(strcat)
.p2align 2
// Can read 6 bytes before possibly crossing a page.
.L_strcpy_unalign6:
strcpy_unalign6:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register
bne strcpy_zero_in_first_register
ldrb r4, [r1]
cbz r4, .L_strcpy_unalign_copy5bytes
cbz r4, strcpy_unalign_copy5bytes
ldrb r5, [r1, #1]
cbz r5, .L_strcpy_unalign_copy6bytes
cbz r5, strcpy_unalign_copy6bytes
ldr r3, [r1], #4
pld [r1, #64]
tst r3, #0xff0000
beq .L_strcpy_copy7bytes
beq strcpy_copy7bytes
lsrs ip, r3, #24
strd r2, r3, [r0], #8
beq .L_strcpy_unalign_return
b .L_strcpy_unalign6
beq strcpy_unalign_return
b strcpy_unalign6
.p2align 2
// Can read 5 bytes before possibly crossing a page.
.L_strcpy_unalign5:
strcpy_unalign5:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register
bne strcpy_zero_in_first_register
ldrb r4, [r1]
cbz r4, .L_strcpy_unalign_copy5bytes
cbz r4, strcpy_unalign_copy5bytes
ldr r3, [r1], #4
@@ -365,17 +365,17 @@ ENTRY(strcat)
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b .L_strcpy_unalign5
b strcpy_unalign5
.L_strcpy_unalign_copy5bytes:
strcpy_unalign_copy5bytes:
str r2, [r0], #4
strb r4, [r0]
m_pop
.L_strcpy_unalign_copy6bytes:
strcpy_unalign_copy6bytes:
str r2, [r0], #4
strb r4, [r0], #1
strb r5, [r0]
@@ -383,13 +383,13 @@ ENTRY(strcat)
.p2align 2
// Can read 4 bytes before possibly crossing a page.
.L_strcpy_unalign4:
strcpy_unalign4:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register
bne strcpy_zero_in_first_register
ldr r3, [r1], #4
pld [r1, #64]
@@ -397,20 +397,20 @@ ENTRY(strcat)
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b .L_strcpy_unalign4
b strcpy_unalign4
.p2align 2
// Can read 3 bytes before possibly crossing a page.
.L_strcpy_unalign3:
strcpy_unalign3:
ldrb r2, [r1]
cbz r2, .L_strcpy_unalign3_copy1byte
cbz r2, strcpy_unalign3_copy1byte
ldrb r3, [r1, #1]
cbz r3, .L_strcpy_unalign3_copy2bytes
cbz r3, strcpy_unalign3_copy2bytes
ldrb r4, [r1, #2]
cbz r4, .L_strcpy_unalign3_copy3bytes
cbz r4, strcpy_unalign3_copy3bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
@@ -418,26 +418,26 @@ ENTRY(strcat)
pld [r1, #64]
lsrs lr, r2, #24
beq .L_strcpy_copy4bytes
beq strcpy_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b .L_strcpy_unalign3
b strcpy_unalign3
.L_strcpy_unalign3_copy1byte:
strcpy_unalign3_copy1byte:
strb r2, [r0]
m_pop
.L_strcpy_unalign3_copy2bytes:
strcpy_unalign3_copy2bytes:
strb r2, [r0], #1
strb r3, [r0]
m_pop
.L_strcpy_unalign3_copy3bytes:
strcpy_unalign3_copy3bytes:
strb r2, [r0], #1
strb r3, [r0], #1
strb r4, [r0]
@@ -445,34 +445,34 @@ ENTRY(strcat)
.p2align 2
// Can read 2 bytes before possibly crossing a page.
.L_strcpy_unalign2:
strcpy_unalign2:
ldrb r2, [r1]
cbz r2, .L_strcpy_unalign_copy1byte
cbz r2, strcpy_unalign_copy1byte
ldrb r4, [r1, #1]
cbz r4, .L_strcpy_unalign_copy2bytes
cbz r4, strcpy_unalign_copy2bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
tst r2, #0xff0000
beq .L_strcpy_copy3bytes
beq strcpy_copy3bytes
lsrs ip, r2, #24
beq .L_strcpy_copy4bytes
beq strcpy_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b .L_strcpy_unalign2
b strcpy_unalign2
.p2align 2
// Can read 1 byte before possibly crossing a page.
.L_strcpy_unalign1:
strcpy_unalign1:
ldrb r2, [r1]
cbz r2, .L_strcpy_unalign_copy1byte
cbz r2, strcpy_unalign_copy1byte
ldr r2, [r1], #4
ldr r3, [r1], #4
@@ -482,27 +482,27 @@ ENTRY(strcat)
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_first_register
bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_strcpy_zero_in_second_register
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b .L_strcpy_unalign1
b strcpy_unalign1
.L_strcpy_unalign_copy1byte:
strcpy_unalign_copy1byte:
strb r2, [r0]
m_pop
.L_strcpy_unalign_copy2bytes:
strcpy_unalign_copy2bytes:
strb r2, [r0], #1
strb r4, [r0]
m_pop
.p2align 2
.L_strcat_mainloop:
strcat_mainloop:
ldrd r2, r3, [r0], #8
pld [r0, #64]
@@ -510,59 +510,59 @@ ENTRY(strcat)
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_strcat_zero_in_first_register
bne strcat_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_strcat_zero_in_second_register
b .L_strcat_mainloop
bne strcat_zero_in_second_register
b strcat_mainloop
.L_strcat_zero_in_first_register:
strcat_zero_in_first_register:
// Prefetch the src now, it's going to be used soon.
pld [r1, #0]
lsls lr, ip, #17
bne .L_strcat_sub8
bcs .L_strcat_sub7
bne strcat_sub8
bcs strcat_sub7
lsls ip, ip, #1
bne .L_strcat_sub6
bne strcat_sub6
sub r0, r0, #5
b .L_strcat_r0_scan_done
b strcat_r0_scan_done
.L_strcat_sub8:
strcat_sub8:
sub r0, r0, #8
b .L_strcat_r0_scan_done
b strcat_r0_scan_done
.L_strcat_sub7:
strcat_sub7:
sub r0, r0, #7
b .L_strcat_r0_scan_done
b strcat_r0_scan_done
.L_strcat_sub6:
strcat_sub6:
sub r0, r0, #6
b .L_strcat_r0_scan_done
b strcat_r0_scan_done
.L_strcat_zero_in_second_register:
strcat_zero_in_second_register:
// Prefetch the src now, it's going to be used soon.
pld [r1, #0]
lsls lr, ip, #17
bne .L_strcat_sub4
bcs .L_strcat_sub3
bne strcat_sub4
bcs strcat_sub3
lsls ip, ip, #1
bne .L_strcat_sub2
bne strcat_sub2
sub r0, r0, #1
b .L_strcat_r0_scan_done
b strcat_r0_scan_done
.L_strcat_sub4:
strcat_sub4:
sub r0, r0, #4
b .L_strcat_r0_scan_done
b strcat_r0_scan_done
.L_strcat_sub3:
strcat_sub3:
sub r0, r0, #3
b .L_strcat_r0_scan_done
b strcat_r0_scan_done
.L_strcat_sub2:
strcat_sub2:
sub r0, r0, #2
b .L_strcat_r0_scan_done
b strcat_r0_scan_done
END(strcat)

View File

@@ -65,38 +65,38 @@ ENTRY(strlen)
mov r1, r0
ands r3, r0, #7
beq .L_mainloop
beq mainloop
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .L_align_to_32
beq align_to_32
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_return
cbz r2, update_count_and_return
.L_align_to_32:
bcc .L_align_to_64
align_to_32:
bcc align_to_64
ands ip, r3, #2
beq .L_align_to_64
beq align_to_64
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_return
cbz r2, update_count_and_return
ldrb r2, [r1], #1
cbz r2, .L_update_count_and_return
cbz r2, update_count_and_return
.L_align_to_64:
align_to_64:
tst r3, #4
beq .L_mainloop
beq mainloop
ldr r3, [r1], #4
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
bne zero_in_second_register
.p2align 2
.L_mainloop:
mainloop:
ldrd r2, r3, [r1], #8
pld [r1, #64]
@@ -104,62 +104,62 @@ ENTRY(strlen)
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .L_zero_in_first_register
bne zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .L_zero_in_second_register
b .L_mainloop
bne zero_in_second_register
b mainloop
.L_update_count_and_return:
update_count_and_return:
sub r0, r1, r0
sub r0, r0, #1
bx lr
.L_zero_in_first_register:
zero_in_first_register:
sub r0, r1, r0
lsls r3, ip, #17
bne .L_sub8_and_return
bcs .L_sub7_and_return
bne sub8_and_return
bcs sub7_and_return
lsls ip, ip, #1
bne .L_sub6_and_return
bne sub6_and_return
sub r0, r0, #5
bx lr
.L_sub8_and_return:
sub8_and_return:
sub r0, r0, #8
bx lr
.L_sub7_and_return:
sub7_and_return:
sub r0, r0, #7
bx lr
.L_sub6_and_return:
sub6_and_return:
sub r0, r0, #6
bx lr
.L_zero_in_second_register:
zero_in_second_register:
sub r0, r1, r0
lsls r3, ip, #17
bne .L_sub4_and_return
bcs .L_sub3_and_return
bne sub4_and_return
bcs sub3_and_return
lsls ip, ip, #1
bne .L_sub2_and_return
bne sub2_and_return
sub r0, r0, #1
bx lr
.L_sub4_and_return:
sub4_and_return:
sub r0, r0, #4
bx lr
.L_sub3_and_return:
sub3_and_return:
sub r0, r0, #3
bx lr
.L_sub2_and_return:
sub2_and_return:
sub r0, r0, #2
bx lr
END(strlen)

View File

@@ -1 +0,0 @@
include bionic/libc/arch-arm/cortex-a7/cortex-a7.mk

View File

@@ -133,7 +133,8 @@ ENTRY_PRIVATE(MEMCPY_BASE)
strbcs ip, [r0], #1
strbcs lr, [r0], #1
ldmfd sp!, {r0, pc}
ldmfd sp!, {r0, lr}
bx lr
END(MEMCPY_BASE)
ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED)

View File

@@ -35,7 +35,6 @@
*/
.fpu neon
.syntax unified
ENTRY(__memset_chk)
cmp r2, r3
@@ -69,9 +68,12 @@ END(bzero)
ENTRY(memset)
// The neon memset only wins for less than 132.
cmp r2, #132
bhi .L_memset_large_copy
bhi __memset_large_copy
stmfd sp!, {r0}
.cfi_def_cfa_offset 4
.cfi_rel_offset r0, 0
mov r3, r0
vdup.8 q0, r1
/* make sure we have at least 32 bytes to write */
@@ -81,7 +83,7 @@ ENTRY(memset)
1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32
vst1.8 {d0 - d3}, [r3]!
vst1.8 {d0 - d3}, [r0]!
bhs 1b
2: /* less than 32 left */
@@ -90,20 +92,22 @@ ENTRY(memset)
beq 3f
// writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r3]!
vst1.8 {d0, d1}, [r0]!
3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r3]!
vst1.8 {d0}, [r0]!
1: bge 2f
vst1.32 {d0[0]}, [r3]!
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
strbmi r1, [r3], #1
strbcs r1, [r3], #1
strbcs r1, [r3], #1
strmib r1, [r0], #1
strcsb r1, [r0], #1
strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr
END(memset)
.L_memset_large_copy:
ENTRY_PRIVATE(__memset_large_copy)
/* compute the offset to align the destination
* offset = (4-(src&3))&3 = -src & 3
*/
@@ -127,11 +131,12 @@ ENTRY(memset)
orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31
strbcs r1, [r0], #1 /* can't use strh (alignment unknown) */
strbcs r1, [r0], #1
strbmi r1, [r0], #1
strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strcsb r1, [r0], #1
strmib r1, [r0], #1
subs r2, r2, r3
popls {r0, r4-r7, pc} /* return */
ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */
mov r12, r1
@@ -150,9 +155,9 @@ ENTRY(memset)
/* conditionally writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28
stmcs r0!, {r1, lr}
stmcs r0!, {r1, lr}
stmmi r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2
strcs r1, [r0], #4
@@ -167,15 +172,16 @@ ENTRY(memset)
/* conditionally stores 0 to 31 bytes */
movs r2, r2, lsl #28
stmcs r0!, {r1,r3,r12,lr}
stmmi r0!, {r1, lr}
stmcsia r0!, {r1,r3,r12,lr}
stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2
strcs r1, [r0], #4
strhmi r1, [r0], #2
strmih r1, [r0], #2
movs r2, r2, lsl #2
strbcs r1, [r0]
ldmfd sp!, {r0, r4-r7, pc}
END(memset)
strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, lr}
bx lr
END(__memset_large_copy)
.data
error_string:

View File

@@ -40,8 +40,6 @@
* Optimized memcmp() for Cortex-A9.
*/
.syntax unified
ENTRY(memcmp)
pld [r0, #(CACHE_LINE_SIZE * 0)]
pld [r0, #(CACHE_LINE_SIZE * 1)]
@@ -163,25 +161,25 @@ ENTRY(memcmp)
eors r0, r0, ip
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eorseq r0, r0, lr
eoreqs r0, r0, lr
ldreq r0, [r4], #4
ldreq lr, [r1, #4]!
eorseq r0, r0, ip
eoreqs r0, r0, ip
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eorseq r0, r0, lr
eoreqs r0, r0, lr
ldreq r0, [r4], #4
ldreq lr, [r1, #4]!
eorseq r0, r0, ip
eoreqs r0, r0, ip
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eorseq r0, r0, lr
eoreqs r0, r0, lr
ldreq r0, [r4], #4
ldreq lr, [r1, #4]!
eorseq r0, r0, ip
eoreqs r0, r0, ip
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eorseq r0, r0, lr
eoreqs r0, r0, lr
bne 2f
subs r2, r2, #32
bhs 0b
@@ -221,7 +219,8 @@ ENTRY(memcmp)
bne 8b
9: /* restore registers and return */
ldmfd sp!, {r4, pc}
ldmfd sp!, {r4, lr}
bx lr
10: /* process less than 12 bytes */
cmp r2, #0
@@ -264,17 +263,17 @@ ENTRY(memcmp)
ldreq lr, [r1], #4
ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip
eoreqs r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip
eoreqs r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip
eoreqs r0, r0, ip
bne 7f
subs r2, r2, #16
bhs 6b
@@ -318,7 +317,7 @@ ENTRY(memcmp)
ldreq r7, [r1], #4
ldreq r0, [r4], #4
orreq ip, ip, r7, lsl r6
eorseq r0, r0, ip
eoreqs r0, r0, ip
bne 7f
subs r2, r2, #8
bhs 6b

View File

@@ -37,8 +37,6 @@
* so we have to preserve R0.
*/
.syntax unified
ENTRY(__memcpy_chk)
cmp r2, r3
bhi __memcpy_chk_fail
@@ -83,12 +81,12 @@ ENTRY(memcpy)
*/
movs r12, r3, lsl #31
sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
ldrbmi r3, [r1], #1
ldrbcs r4, [r1], #1
ldrbcs r12,[r1], #1
strbmi r3, [r0], #1
strbcs r4, [r0], #1
strbcs r12,[r0], #1
ldrmib r3, [r1], #1
ldrcsb r4, [r1], #1
ldrcsb r12,[r1], #1
strmib r3, [r0], #1
strcsb r4, [r0], #1
strcsb r12,[r0], #1
.Lsrc_aligned:
@@ -111,10 +109,10 @@ ENTRY(memcpy)
/* conditionally copies 0 to 7 words (length in r3) */
movs r12, r3, lsl #28
ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmi r1!, {r8, r9} /* 8 bytes */
stmcs r0!, {r4, r5, r6, r7}
stmmi r0!, {r8, r9}
ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r8, r9}
tst r3, #0x4
ldrne r10,[r1], #4 /* 4 bytes */
strne r10,[r0], #4
@@ -179,22 +177,23 @@ ENTRY(memcpy)
/* conditionnaly copies 0 to 31 bytes */
movs r12, r2, lsl #28
ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmi r1!, {r8, r9} /* 8 bytes */
stmcs r0!, {r4, r5, r6, r7}
stmmi r0!, {r8, r9}
ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r8, r9}
movs r12, r2, lsl #30
ldrcs r3, [r1], #4 /* 4 bytes */
ldrhmi r4, [r1], #2 /* 2 bytes */
ldrmih r4, [r1], #2 /* 2 bytes */
strcs r3, [r0], #4
strhmi r4, [r0], #2
strmih r4, [r0], #2
tst r2, #0x1
ldrbne r3, [r1] /* last byte */
strbne r3, [r0]
ldrneb r3, [r1] /* last byte */
strneb r3, [r0]
/* we're done! restore everything and return */
1: ldmfd sp!, {r5-r11}
ldmfd sp!, {r0, r4, pc}
ldmfd sp!, {r0, r4, lr}
bx lr
/********************************************************************/
@@ -229,11 +228,11 @@ ENTRY(memcpy)
* becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
*/
movs r5, r5, lsl #31
strbmi r3, [r0], #1
strmib r3, [r0], #1
movmi r3, r3, lsr #8
strbcs r3, [r0], #1
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
strbcs r3, [r0], #1
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
cmp r2, #4
@@ -364,27 +363,28 @@ ENTRY(memcpy)
.Lpartial_word_tail:
/* we have a partial word in the input buffer */
movs r5, lr, lsl #(31-3)
strbmi r3, [r0], #1
strmib r3, [r0], #1
movmi r3, r3, lsr #8
strbcs r3, [r0], #1
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
strbcs r3, [r0], #1
strcsb r3, [r0], #1
/* Refill spilled registers from the stack. Don't update sp. */
ldmfd sp, {r5-r11}
.Lcopy_last_3_and_return:
movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
ldrbmi r2, [r1], #1
ldrbcs r3, [r1], #1
ldrbcs r12,[r1]
strbmi r2, [r0], #1
strbcs r3, [r0], #1
strbcs r12,[r0]
ldrmib r2, [r1], #1
ldrcsb r3, [r1], #1
ldrcsb r12,[r1]
strmib r2, [r0], #1
strcsb r3, [r0], #1
strcsb r12,[r0]
/* we're done! restore sp and spilled registers and return */
add sp, sp, #28
ldmfd sp!, {r0, r4, pc}
ldmfd sp!, {r0, r4, lr}
bx lr
END(memcpy)
// Only reached when the __memcpy_chk check fails.

View File

@@ -35,8 +35,6 @@
* memset() returns its first argument.
*/
.syntax unified
ENTRY(__memset_chk)
cmp r2, r3
bls done
@@ -78,11 +76,12 @@ ENTRY(memset)
orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31
strbcs r1, [r0], #1 /* can't use strh (alignment unknown) */
strbcs r1, [r0], #1
strbmi r1, [r0], #1
strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strcsb r1, [r0], #1
strmib r1, [r0], #1
subs r2, r2, r3
popls {r0, r4-r7, pc} /* return */
ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */
mov r12, r1
@@ -101,9 +100,9 @@ ENTRY(memset)
/* conditionally writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28
stmcs r0!, {r1, lr}
stmcs r0!, {r1, lr}
stmmi r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2
strcs r1, [r0], #4
@@ -118,14 +117,15 @@ ENTRY(memset)
/* conditionally stores 0 to 31 bytes */
movs r2, r2, lsl #28
stmcs r0!, {r1,r3,r12,lr}
stmmi r0!, {r1, lr}
stmcsia r0!, {r1,r3,r12,lr}
stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2
strcs r1, [r0], #4
strhmi r1, [r0], #2
strmih r1, [r0], #2
movs r2, r2, lsl #2
strbcs r1, [r0]
ldmfd sp!, {r0, r4-r7, pc}
strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, lr}
bx lr
END(memset)
.data

View File

@@ -32,8 +32,6 @@
#include <machine/cpu-features.h>
#include <private/bionic_asm.h>
.syntax unified
ENTRY(strcpy)
pld [r1, #0]
eor r2, r0, r1
@@ -110,15 +108,15 @@ ENTRY(strcpy)
#ifdef __ARMEB__
tst r2, #0xff00
iteet ne
strhne r2, [ip], #2
strneh r2, [ip], #2
lsreq r2, r2, #8
strbeq r2, [ip]
streqb r2, [ip]
tstne r2, #0xff
#else
tst r2, #0xff
itet ne
strhne r2, [ip], #2
strbeq r2, [ip]
strneh r2, [ip], #2
streqb r2, [ip]
tstne r2, #0xff00
#endif
bne 5b

View File

@@ -118,5 +118,6 @@ ENTRY_PRIVATE(MEMCPY_BASE)
strbcs ip, [r0], #1
strbcs lr, [r0], #1
ldmfd sp!, {r0, pc}
ldmfd sp!, {r0, lr}
bx lr
END(MEMCPY_BASE)

View File

@@ -37,7 +37,6 @@
*/
.fpu neon
.syntax unified
ENTRY(__memset_chk)
cmp r2, r3
@@ -69,7 +68,10 @@ END(bzero)
/* memset() returns its first argument. */
ENTRY(memset)
mov r3, r0
stmfd sp!, {r0}
.cfi_def_cfa_offset 4
.cfi_rel_offset r0, 0
vdup.8 q0, r1
/* make sure we have at least 32 bytes to write */
@@ -79,7 +81,7 @@ ENTRY(memset)
1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32
vst1.8 {d0 - d3}, [r3]!
vst1.8 {d0 - d3}, [r0]!
bhs 1b
2: /* less than 32 left */
@@ -88,17 +90,18 @@ ENTRY(memset)
beq 3f
// writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r3]!
vst1.8 {d0, d1}, [r0]!
3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r3]!
vst1.8 {d0}, [r0]!
1: bge 2f
vst1.32 {d0[0]}, [r3]!
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
strbmi r1, [r3], #1
strbcs r1, [r3], #1
strbcs r1, [r3], #1
strmib r1, [r0], #1
strcsb r1, [r0], #1
strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr
END(memset)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___clock_nanosleep)
ENTRY(__clock_nanosleep)
mov ip, r7
ldr r7, =__NR_clock_nanosleep
swi #0
@@ -11,5 +11,4 @@ ENTRY(___clock_nanosleep)
bxls lr
neg r0, r0
b __set_errno_internal
END(___clock_nanosleep)
.hidden ___clock_nanosleep
END(__clock_nanosleep)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___rt_sigqueueinfo)
ENTRY(__rt_sigqueueinfo)
mov ip, r7
ldr r7, =__NR_rt_sigqueueinfo
swi #0
@@ -11,5 +11,4 @@ ENTRY(___rt_sigqueueinfo)
bxls lr
neg r0, r0
b __set_errno_internal
END(___rt_sigqueueinfo)
.hidden ___rt_sigqueueinfo
END(__rt_sigqueueinfo)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fgetxattr)
ENTRY(fgetxattr)
mov ip, r7
ldr r7, =__NR_fgetxattr
swi #0
@@ -11,5 +11,4 @@ ENTRY(___fgetxattr)
bxls lr
neg r0, r0
b __set_errno_internal
END(___fgetxattr)
.hidden ___fgetxattr
END(fgetxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___flistxattr)
ENTRY(flistxattr)
mov ip, r7
ldr r7, =__NR_flistxattr
swi #0
@@ -11,5 +11,4 @@ ENTRY(___flistxattr)
bxls lr
neg r0, r0
b __set_errno_internal
END(___flistxattr)
.hidden ___flistxattr
END(flistxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fsetxattr)
ENTRY(fsetxattr)
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16
@@ -19,5 +19,4 @@ ENTRY(___fsetxattr)
bxls lr
neg r0, r0
b __set_errno_internal
END(___fsetxattr)
.hidden ___fsetxattr
END(fsetxattr)

View File

@@ -0,0 +1,14 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(ftruncate)
mov ip, r7
ldr r7, =__NR_ftruncate
swi #0
mov r7, ip
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(ftruncate)

View File

@@ -1,22 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_readv)
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
ldmfd ip, {r4, r5, r6}
ldr r7, =__NR_process_vm_readv
swi #0
ldmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 0
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(process_vm_readv)

View File

@@ -1,22 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_writev)
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
ldmfd ip, {r4, r5, r6}
ldr r7, =__NR_process_vm_writev
swi #0
ldmfd sp!, {r4, r5, r6, r7}
.cfi_def_cfa_offset 0
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(process_vm_writev)

View File

@@ -0,0 +1,14 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(vfork)
mov ip, r7
ldr r7, =__NR_vfork
swi #0
mov r7, ip
cmn r0, #(MAX_ERRNO + 1)
bxls lr
neg r0, r0
b __set_errno_internal
END(vfork)

View File

@@ -40,6 +40,8 @@ libc_bionic_src_files_arm64 += \
arch-arm64/bionic/syscall.S \
arch-arm64/bionic/vfork.S \
# Work around for http://b/20065774.
libc_bionic_src_files_arm64 += arch-arm64/bionic/libgcc_compat.c
libc_crt_target_cflags_arm64 := \
-I$(LOCAL_PATH)/arch-arm64/include

View File

@@ -0,0 +1,15 @@
/* STOPSHIP: remove this once the flounder blobs have been rebuilt (http://b/20065774). */
#if !defined(__clang__)
extern void __clear_cache(char*, char*);
extern char _Unwind_Backtrace;
extern char _Unwind_GetIP;
void* __bionic_libgcc_compat_symbols[] = {
&__clear_cache,
&_Unwind_Backtrace,
&_Unwind_GetIP,
};
#endif

View File

@@ -31,11 +31,6 @@
#include <linux/sched.h>
ENTRY(vfork)
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
mrs x0, tpidr_el0
ldr x0, [x0, #8]
str wzr, [x0, #20]
mov x0, #(CLONE_VM | CLONE_VFORK | SIGCHLD)
mov x1, xzr
mov x2, xzr

View File

@@ -1 +0,0 @@
include bionic/libc/arch-arm64/generic/generic.mk

View File

@@ -101,7 +101,7 @@ ENTRY(memchr)
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */
mov synd, vend.d[0]
mov synd, vend.2d[0]
/* Clear the soff*2 lower bits */
lsl tmp, soff, #1
lsr synd, synd, tmp
@@ -121,7 +121,7 @@ ENTRY(memchr)
/* Use a fast check for the termination condition */
orr vend.16b, vhas_chr1.16b, vhas_chr2.16b
addp vend.2d, vend.2d, vend.2d
mov synd, vend.d[0]
mov synd, vend.2d[0]
/* We're not out of data, loop if we haven't found the character */
cbz synd, .Lloop
@@ -131,7 +131,7 @@ ENTRY(memchr)
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */
mov synd, vend.d[0]
mov synd, vend.2d[0]
/* Only do the clear for the last possible block */
b.hi .Ltail

View File

@@ -109,7 +109,7 @@ ENTRY(strchr)
addp vend1.16b, vend1.16b, vend2.16b // 128->64
lsr tmp1, tmp3, tmp1
mov tmp3, vend1.d[0]
mov tmp3, vend1.2d[0]
bic tmp1, tmp3, tmp1 // Mask padding bits.
cbnz tmp1, .Ltail
@@ -124,7 +124,7 @@ ENTRY(strchr)
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
orr vend1.16b, vend1.16b, vend2.16b
addp vend1.2d, vend1.2d, vend1.2d
mov tmp1, vend1.d[0]
mov tmp1, vend1.2d[0]
cbz tmp1, .Lloop
/* Termination condition found. Now need to establish exactly why
@@ -138,7 +138,7 @@ ENTRY(strchr)
addp vend1.16b, vend1.16b, vend2.16b // 256->128
addp vend1.16b, vend1.16b, vend2.16b // 128->64
mov tmp1, vend1.d[0]
mov tmp1, vend1.2d[0]
.Ltail:
/* Count the trailing zeros, by bit reversing... */
rbit tmp1, tmp1

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___clock_nanosleep)
ENTRY(__clock_nanosleep)
mov x8, __NR_clock_nanosleep
svc #0
@@ -11,5 +11,5 @@ ENTRY(___clock_nanosleep)
b.hi __set_errno_internal
ret
END(___clock_nanosleep)
.hidden ___clock_nanosleep
END(__clock_nanosleep)
.hidden __clock_nanosleep

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___rt_sigqueueinfo)
ENTRY(__rt_sigqueueinfo)
mov x8, __NR_rt_sigqueueinfo
svc #0
@@ -11,5 +11,5 @@ ENTRY(___rt_sigqueueinfo)
b.hi __set_errno_internal
ret
END(___rt_sigqueueinfo)
.hidden ___rt_sigqueueinfo
END(__rt_sigqueueinfo)
.hidden __rt_sigqueueinfo

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fgetxattr)
ENTRY(fgetxattr)
mov x8, __NR_fgetxattr
svc #0
@@ -11,5 +11,4 @@ ENTRY(___fgetxattr)
b.hi __set_errno_internal
ret
END(___fgetxattr)
.hidden ___fgetxattr
END(fgetxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___flistxattr)
ENTRY(flistxattr)
mov x8, __NR_flistxattr
svc #0
@@ -11,5 +11,4 @@ ENTRY(___flistxattr)
b.hi __set_errno_internal
ret
END(___flistxattr)
.hidden ___flistxattr
END(flistxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fsetxattr)
ENTRY(fsetxattr)
mov x8, __NR_fsetxattr
svc #0
@@ -11,5 +11,4 @@ ENTRY(___fsetxattr)
b.hi __set_errno_internal
ret
END(___fsetxattr)
.hidden ___fsetxattr
END(fsetxattr)

View File

@@ -1,14 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_readv)
mov x8, __NR_process_vm_readv
svc #0
cmn x0, #(MAX_ERRNO + 1)
cneg x0, x0, hi
b.hi __set_errno_internal
ret
END(process_vm_readv)

View File

@@ -1,14 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_writev)
mov x8, __NR_process_vm_writev
svc #0
cmn x0, #(MAX_ERRNO + 1)
cneg x0, x0, hi
b.hi __set_errno_internal
ret
END(process_vm_writev)

View File

@@ -28,9 +28,9 @@
#ifdef __LP64__
# define ASM_PTR_SIZE(x) .quad x
# define ASM_ALIGN_TO_PTR_SIZE .balign 8
# define ASM_ALIGN(x)
#else
# define ASM_PTR_SIZE(x) .long x
# define ASM_ALIGN_TO_PTR_SIZE .balign 4
# define ASM_ALIGN(x) .align x
#endif

View File

@@ -29,15 +29,12 @@
#include "asm_multiarch.h"
.section .preinit_array, "aw"
ASM_ALIGN_TO_PTR_SIZE
ASM_PTR_SIZE(0)
.section .init_array, "aw"
ASM_ALIGN_TO_PTR_SIZE
ASM_PTR_SIZE(0)
.section .fini_array, "aw"
ASM_ALIGN_TO_PTR_SIZE
ASM_PTR_SIZE(0)
#if defined(__linux__) && defined(__ELF__)
@@ -45,9 +42,7 @@
#endif
#if defined(__i386__) || defined(__x86_64__)
.section .eh_frame,"a",@progbits
#if defined(__i386__)
.balign 4
#endif
ASM_ALIGN(4)
.type __FRAME_END__, @object
.size __FRAME_END__, 4
__FRAME_END__:

View File

@@ -26,14 +26,22 @@
* SUCH DAMAGE.
*/
#include "asm_multiarch.h"
#ifndef __arm__
.section .init_array, "aw"
ASM_PTR_SIZE(0)
.section .fini_array, "aw"
ASM_PTR_SIZE(0)
#endif
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__i386__) || defined(__x86_64__)
.section .eh_frame,"a",@progbits
#if defined(__i386__)
.balign 4
#endif
ASM_ALIGN(4)
.type __FRAME_END__, @object
.size __FRAME_END__, 4
__FRAME_END__:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* Copyright (C) 2014-2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@@ -37,14 +37,6 @@ ENTRY(vfork)
.set noreorder
.cpload t9
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
.set push
.set mips32r2
rdhwr v0, $29 // v0 = tls; kernel trap on mips32r1
.set pop
lw v0, REGSZ*1(v0) // v0 = v0[TLS_SLOT_THREAD_ID ie 1]
sw $0, REGSZ*2+4(v0) // v0->cached_pid_ = 0
li a0, (CLONE_VM | CLONE_VFORK | SIGCHLD)
li a1, 0
li a2, 0

View File

@@ -6,9 +6,6 @@
libc_bionic_src_files_mips += \
arch-mips/string/memcmp.c \
arch-mips/string/memcpy.S \
arch-mips/string/memset.S \
arch-mips/string/strcmp.S \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
@@ -34,6 +31,7 @@ libc_openbsd_src_files_mips += \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
upstream-openbsd/lib/libc/string/strcmp.c \
upstream-openbsd/lib/libc/string/strcpy.c \
upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/strlcpy.c \
@@ -56,10 +54,14 @@ libc_bionic_src_files_mips += \
ifndef ARCH_MIPS_REV6
libc_bionic_src_files_mips += \
arch-mips/string/memcpy.S \
arch-mips/string/memset.S \
arch-mips/string/mips_strlen.c \
else
libc_bionic_src_files_mips += \
arch-mips/string/memcpy.c \
arch-mips/string/memset.c \
arch-mips/string/strlen.c \
endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,91 @@
/* $OpenBSD: memcpy.c,v 1.1 2014/11/30 19:43:56 deraadt Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Chris Torek.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <string.h>
#include <stdlib.h>
#include <syslog.h>
/*
* sizeof(word) MUST BE A POWER OF TWO
* SO THAT wmask BELOW IS ALL ONES
*/
typedef long word; /* "word" used for optimal copy speed */
#define wsize sizeof(word)
#define wmask (wsize - 1)
/*
* Copy a block of memory, not handling overlap.
*/
void *
memcpy(void *dst0, const void *src0, size_t length)
{
char *dst = dst0;
const char *src = src0;
size_t t;
if (length == 0 || dst == src) /* nothing to do */
goto done;
/*
* Macros: loop-t-times; and loop-t-times, t>0
*/
#define TLOOP(s) if (t) TLOOP1(s)
#define TLOOP1(s) do { s; } while (--t)
/*
* Copy forward.
*/
t = (long)src; /* only need low bits */
if ((t | (long)dst) & wmask) {
/*
* Try to align operands. This cannot be done
* unless the low bits match.
*/
if ((t ^ (long)dst) & wmask || length < wsize)
t = length;
else
t = wsize - (t & wmask);
length -= t;
TLOOP1(*dst++ = *src++);
}
/*
* Copy whole words, then mop up any trailing bytes.
*/
t = length / wsize;
TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize);
t = length & wmask;
TLOOP(*dst++ = *src++);
done:
return (dst0);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2013
* Copyright (c) 2009
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
@@ -27,410 +27,216 @@
* SUCH DAMAGE.
*/
#ifdef __ANDROID__
# include <private/bionic_asm.h>
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
#elif _LIBC
# include <sysdep.h>
# include <regdef.h>
# include <sys/asm.h>
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
#elif _COMPILING_NEWLIB
# include "machine/asm.h"
# include "machine/regdef.h"
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
/************************************************************************
*
* memset.S, version "64h" with 1 cache line horizon for "pref 30" and 14 nops
* Version: "043009"
*
************************************************************************/
/************************************************************************
* Include files
************************************************************************/
#include <private/bionic_asm.h>
/*
* This routine could be optimized for MIPS64. The current code only
* uses MIPS32 instructions.
*/
#if defined(__MIPSEB__)
# define SWHI swl /* high part is left in big-endian */
# define SWLO swr /* low part is right in big-endian */
#endif
#if defined(__MIPSEL__)
# define SWHI swr /* high part is right in little-endian */
# define SWLO swl /* low part is left in little-endian */
#endif
#if !(defined(XGPROF) || defined(XPROF))
#undef SETUP_GP
#define SETUP_GP
#endif
#ifdef NDEBUG
#define DBG #
#else
# include <regdef.h>
# include <sys/asm.h>
#define DBG
#endif
/* Check to see if the MIPS architecture we are compiling for supports
prefetching. */
LEAF(memset,0)
#if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64)
# ifndef DISABLE_PREFETCH
# define USE_PREFETCH
# endif
#endif
#if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32))
# ifndef DISABLE_DOUBLE
# define USE_DOUBLE
# endif
#endif
#ifndef USE_DOUBLE
# ifndef DISABLE_DOUBLE_ALIGN
# define DOUBLE_ALIGN
# endif
#endif
/* Some asm.h files do not have the L macro definition. */
#ifndef L
# if _MIPS_SIM == _ABIO32
# define L(label) $L ## label
# else
# define L(label) .L ## label
# endif
#endif
/* Some asm.h files do not have the PTR_ADDIU macro definition. */
#ifndef PTR_ADDIU
# if _MIPS_SIM == _ABIO32
# define PTR_ADDIU addiu
# else
# define PTR_ADDIU daddiu
# endif
#endif
/* New R6 instructions that may not be in asm.h. */
#ifndef PTR_LSA
# if _MIPS_SIM == _ABIO32
# define PTR_LSA lsa
# else
# define PTR_LSA dlsa
# endif
#endif
/* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
or PREFETCH_STORE_STREAMED offers a large performance advantage
but PREPAREFORSTORE has some special restrictions to consider.
Prefetch with the 'prepare for store' hint does not copy a memory
location into the cache, it just allocates a cache line and zeros
it out. This means that if you do not write to the entire cache
line before writing it out to memory some data will get zero'ed out
when the cache line is written back to memory and data will be lost.
There are ifdef'ed sections of this memcpy to make sure that it does not
do prefetches on cache lines that are not going to be completely written.
This code is only needed and only used when PREFETCH_STORE_HINT is set to
PREFETCH_HINT_PREPAREFORSTORE. This code assumes that cache lines are
less than MAX_PREFETCH_SIZE bytes and if the cache line is larger it will
not work correctly. */
#ifdef USE_PREFETCH
# define PREFETCH_HINT_STORE 1
# define PREFETCH_HINT_STORE_STREAMED 5
# define PREFETCH_HINT_STORE_RETAINED 7
# define PREFETCH_HINT_PREPAREFORSTORE 30
/* If we have not picked out what hints to use at this point use the
standard load and store prefetch hints. */
# ifndef PREFETCH_STORE_HINT
# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
# endif
/* We double everything when USE_DOUBLE is true so we do 2 prefetches to
get 64 bytes in that case. The assumption is that each individual
prefetch brings in 32 bytes. */
# ifdef USE_DOUBLE
# define PREFETCH_CHUNK 64
# define PREFETCH_FOR_STORE(chunk, reg) \
pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
# else
# define PREFETCH_CHUNK 32
# define PREFETCH_FOR_STORE(chunk, reg) \
pref PREFETCH_STORE_HINT, (chunk)*32(reg)
# endif
/* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
than PREFETCH_CHUNK, the assumed size of each prefetch. If the real size
of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE
hint is used, the code will not work correctly. If PREPAREFORSTORE is not
used than MAX_PREFETCH_SIZE does not matter. */
# define MAX_PREFETCH_SIZE 128
/* PREFETCH_LIMIT is set based on the fact that we never use an offset greater
than 5 on a STORE prefetch and that a single prefetch can never be larger
than MAX_PREFETCH_SIZE. We add the extra 32 when USE_DOUBLE is set because
we actually do two prefetches in that case, one 32 bytes after the other. */
# ifdef USE_DOUBLE
# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
# else
# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
# endif
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
&& ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
/* We cannot handle this because the initial prefetches may fetch bytes that
are before the buffer being copied. We start copies with an offset
of 4 so avoid this situation when using PREPAREFORSTORE. */
# error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
# endif
#else /* USE_PREFETCH not defined */
# define PREFETCH_FOR_STORE(offset, reg)
#endif
#if __mips_isa_rev > 5
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
# undef PREFETCH_STORE_HINT
# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
# endif
# define R6_CODE
#endif
/* Allow the routine to be named something else if desired. */
#ifndef MEMSET_NAME
# define MEMSET_NAME memset
#endif
/* We load/store 64 bits at a time when USE_DOUBLE is true.
The C_ prefix stands for CHUNK and is used to avoid macro name
conflicts with system header files. */
#ifdef USE_DOUBLE
# define C_ST sd
# if __MIPSEB
# define C_STHI sdl /* high part is left in big-endian */
# else
# define C_STHI sdr /* high part is right in little-endian */
# endif
#else
# define C_ST sw
# if __MIPSEB
# define C_STHI swl /* high part is left in big-endian */
# else
# define C_STHI swr /* high part is right in little-endian */
# endif
#endif
/* Bookkeeping values for 32 vs. 64 bit mode. */
#ifdef USE_DOUBLE
# define NSIZE 8
# define NSIZEMASK 0x3f
# define NSIZEDMASK 0x7f
#else
# define NSIZE 4
# define NSIZEMASK 0x1f
# define NSIZEDMASK 0x3f
#endif
#define UNIT(unit) ((unit)*NSIZE)
#define UNITM1(unit) (((unit)*NSIZE)-1)
#ifdef __ANDROID__
LEAF(MEMSET_NAME,0)
#else
LEAF(MEMSET_NAME)
#endif
.set nomips16
.set noreorder
/* If the size is less than 2*NSIZE (8 or 16), go to L(lastb). Regardless of
size, copy dst pointer to v0 for the return value. */
slti t2,a2,(2 * NSIZE)
bne t2,zero,L(lastb)
move v0,a0
.set noat
/* If memset value is not zero, we copy it to all the bytes in a 32 or 64
bit word. */
beq a1,zero,L(set0) /* If memset value is zero no smear */
PTR_SUBU a3,zero,a0
nop
addu t0,a0,a2 # t0 is the "past the end" address
slti AT,a2,4 # is a2 less than 4?
bne AT,zero,.Llast4 # if yes, go to last4
move v0,a0 # memset returns the dst pointer
/* smear byte into 32 or 64 bit word */
#if ((__mips == 64) || (__mips == 32)) && (__mips_isa_rev >= 2)
# ifdef USE_DOUBLE
dins a1, a1, 8, 8 /* Replicate fill byte into half-word. */
dins a1, a1, 16, 16 /* Replicate fill byte into word. */
dins a1, a1, 32, 32 /* Replicate fill byte into dbl word. */
# else
ins a1, a1, 8, 8 /* Replicate fill byte into half-word. */
ins a1, a1, 16, 16 /* Replicate fill byte into word. */
# endif
beq a1,zero,.Lset0
subu v1,zero,a0
# smear byte into 32 bit word
#if (__mips==32) && (__mips_isa_rev>=2)
ins a1, a1, 8, 8 # Replicate fill byte into half-word.
ins a1, a1, 16, 16 # Replicate fill byte into word.
#else
# ifdef USE_DOUBLE
and a1,0xff
dsll t2,a1,8
or a1,t2
dsll t2,a1,16
or a1,t2
dsll t2,a1,32
or a1,t2
# else
and a1,0xff
sll t2,a1,8
or a1,t2
sll t2,a1,16
or a1,t2
# endif
and a1,0xff
sll AT,a1,8
or a1,AT
sll AT,a1,16
or a1,AT
#endif
/* If the destination address is not aligned do a partial store to get it
aligned. If it is already aligned just jump to L(aligned). */
L(set0):
#ifndef R6_CODE
andi t2,a3,(NSIZE-1) /* word-unaligned address? */
beq t2,zero,L(aligned) /* t2 is the unalignment count */
PTR_SUBU a2,a2,t2
C_STHI a1,0(a0)
PTR_ADDU a0,a0,t2
#else /* R6_CODE */
andi t2,a0,(NSIZE-1)
lapc t9,L(atable)
PTR_LSA t9,t2,t9,2
jrc t9
L(atable):
bc L(aligned)
# ifdef USE_DOUBLE
bc L(lb7)
bc L(lb6)
bc L(lb5)
bc L(lb4)
# endif
bc L(lb3)
bc L(lb2)
bc L(lb1)
L(lb7):
sb a1,6(a0)
L(lb6):
sb a1,5(a0)
L(lb5):
sb a1,4(a0)
L(lb4):
sb a1,3(a0)
L(lb3):
sb a1,2(a0)
L(lb2):
sb a1,1(a0)
L(lb1):
sb a1,0(a0)
.Lset0:
andi v1,v1,0x3 # word-unaligned address?
beq v1,zero,.Laligned # v1 is the unalignment count
subu a2,a2,v1
SWHI a1,0(a0)
addu a0,a0,v1
li t9,NSIZE
subu t2,t9,t2
PTR_SUBU a2,a2,t2
PTR_ADDU a0,a0,t2
#endif /* R6_CODE */
# Here we have the "word-aligned" a0 (until the "last4")
.Laligned:
andi t8,a2,0x3f # any 64-byte chunks?
# t8 is the byte count past 64-byte chunks
beq a2,t8,.Lchk8w # when a2==t8, no 64-byte chunks
# There will be at most 1 32-byte chunk then
subu a3,a2,t8 # subtract from a2 the reminder
# Here a3 counts bytes in 16w chunks
addu a3,a0,a3 # Now a3 is the final dst after 64-byte chunks
# Find out, if there are any 64-byte chunks after which will be still at least
# 96 bytes left. The value "96" is calculated as needed buffer for
# "pref 30,64(a0)" prefetch, which can be used as "pref 30,0(a0)" after
# incrementing "a0" by 64.
# For "a2" below 160 there will be no such "pref 30 safe" 64-byte chunk.
#
sltiu v1,a2,160
bgtz v1,.Lloop16w_nopref30 # skip "pref 30,0(a0)"
subu t7,a2,96 # subtract "pref 30 unsafe" region
# below we have at least 1 64-byte chunk which is "pref 30 safe"
andi t6,t7,0x3f # t6 is past "64-byte safe chunks" reminder
subu t5,t7,t6 # subtract from t7 the reminder
# Here t5 counts bytes in 16w "safe" chunks
addu t4,a0,t5 # Now t4 is the dst after 64-byte "safe" chunks
# Don't use "pref 30,0(a0)" for a0 in a "middle" of a cache line
# pref 30,0(a0)
# Here we are in the region, where it is safe to use "pref 30,64(a0)"
.Lloop16w:
addiu a0,a0,64
pref 30,-32(a0) # continue setting up the dest, addr 64-32
sw a1,-64(a0)
sw a1,-60(a0)
sw a1,-56(a0)
sw a1,-52(a0)
sw a1,-48(a0)
sw a1,-44(a0)
sw a1,-40(a0)
sw a1,-36(a0)
nop
nop # the extra nop instructions help to balance
nop # cycles needed for "store" + "fill" + "evict"
nop # For 64byte store there are needed 8 fill
nop # and 8 evict cycles, i.e. at least 32 instr.
nop
nop
pref 30,0(a0) # continue setting up the dest, addr 64-0
sw a1,-32(a0)
sw a1,-28(a0)
sw a1,-24(a0)
sw a1,-20(a0)
sw a1,-16(a0)
sw a1,-12(a0)
sw a1,-8(a0)
sw a1,-4(a0)
nop
nop
nop
nop # NOTE: adding 14 nop-s instead of 12 nop-s
nop # gives better results for "fast" memory
nop
bne a0,t4,.Lloop16w
nop
beq a0,a3,.Lchk8w # maybe no more 64-byte chunks?
nop # this "delayed slot" is useless ...
.Lloop16w_nopref30: # there could be up to 3 "64-byte nopref30" chunks
addiu a0,a0,64
sw a1,-64(a0)
sw a1,-60(a0)
sw a1,-56(a0)
sw a1,-52(a0)
sw a1,-48(a0)
sw a1,-44(a0)
sw a1,-40(a0)
sw a1,-36(a0)
sw a1,-32(a0)
sw a1,-28(a0)
sw a1,-24(a0)
sw a1,-20(a0)
sw a1,-16(a0)
sw a1,-12(a0)
sw a1,-8(a0)
bne a0,a3,.Lloop16w_nopref30
sw a1,-4(a0)
.Lchk8w: # t8 here is the byte count past 64-byte chunks
andi t7,t8,0x1f # is there a 32-byte chunk?
# the t7 is the reminder count past 32-bytes
beq t8,t7,.Lchk1w # when t8==t7, no 32-byte chunk
move a2,t7
L(aligned):
/* If USE_DOUBLE is not set we may still want to align the data on a 16
byte boundry instead of an 8 byte boundry to maximize the opportunity
of proAptiv chips to do memory bonding (combining two sequential 4
byte stores into one 8 byte store). We know there are at least 4 bytes
left to store or we would have jumped to L(lastb) earlier in the code. */
#ifdef DOUBLE_ALIGN
andi t2,a3,4
beq t2,zero,L(double_aligned)
PTR_SUBU a2,a2,t2
sw a1,0(a0)
PTR_ADDU a0,a0,t2
L(double_aligned):
#endif
sw a1,4(a0)
sw a1,8(a0)
sw a1,12(a0)
sw a1,16(a0)
sw a1,20(a0)
sw a1,24(a0)
sw a1,28(a0)
addiu a0,a0,32
/* Now the destination is aligned to (word or double word) aligned address
Set a2 to count how many bytes we have to copy after all the 64/128 byte
chunks are copied and a3 to the dest pointer after all the 64/128 byte
chunks have been copied. We will loop, incrementing a0 until it equals
a3. */
andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
beq a2,t8,L(chkw) /* if a2==t8, no 64-byte/128-byte chunks */
PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */
PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */
.Lchk1w:
andi t8,a2,0x3 # now t8 is the reminder past 1w chunks
beq a2,t8,.Llast4aligned
subu a3,a2,t8 # a3 is the count of bytes in 1w chunks
addu a3,a0,a3 # now a3 is the dst address past the 1w chunks
/* When in the loop we may prefetch with the 'prepare to store' hint,
in this case the a0+x should not be past the "t0-32" address. This
means: for x=128 the last "safe" a0 address is "t0-160". Alternatively,
for x=64 the last "safe" a0 address is "t0-96" In the current version we
will use "prefetch hint,128(a0)", so "t0-160" is the limit. */
#if defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */
PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
#endif
#if defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
PREFETCH_FOR_STORE (1, a0)
PREFETCH_FOR_STORE (2, a0)
PREFETCH_FOR_STORE (3, a0)
#endif
# copying in words (4-byte chunks)
.LwordCopy_loop:
addiu a0,a0,4
bne a0,a3,.LwordCopy_loop
sw a1,-4(a0)
L(loop16w):
#if defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
sltu v1,t9,a0 /* If a0 > t9 don't use next prefetch */
bgtz v1,L(skip_pref)
nop
#endif
#ifndef R6_CODE
PREFETCH_FOR_STORE (4, a0)
PREFETCH_FOR_STORE (5, a0)
#else
PREFETCH_FOR_STORE (2, a0)
#endif
L(skip_pref):
C_ST a1,UNIT(0)(a0)
C_ST a1,UNIT(1)(a0)
C_ST a1,UNIT(2)(a0)
C_ST a1,UNIT(3)(a0)
C_ST a1,UNIT(4)(a0)
C_ST a1,UNIT(5)(a0)
C_ST a1,UNIT(6)(a0)
C_ST a1,UNIT(7)(a0)
C_ST a1,UNIT(8)(a0)
C_ST a1,UNIT(9)(a0)
C_ST a1,UNIT(10)(a0)
C_ST a1,UNIT(11)(a0)
C_ST a1,UNIT(12)(a0)
C_ST a1,UNIT(13)(a0)
C_ST a1,UNIT(14)(a0)
C_ST a1,UNIT(15)(a0)
PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */
bne a0,a3,L(loop16w)
nop
move a2,t8
/* Here we have dest word-aligned but less than 64-bytes or 128 bytes to go.
Check for a 32(64) byte chunk and copy if if there is one. Otherwise
jump down to L(chk1w) to handle the tail end of the copy. */
L(chkw):
andi t8,a2,NSIZEMASK /* is there a 32-byte/64-byte chunk. */
/* the t8 is the reminder count past 32-bytes */
beq a2,t8,L(chk1w)/* when a2==t8, no 32-byte chunk */
nop
C_ST a1,UNIT(0)(a0)
C_ST a1,UNIT(1)(a0)
C_ST a1,UNIT(2)(a0)
C_ST a1,UNIT(3)(a0)
C_ST a1,UNIT(4)(a0)
C_ST a1,UNIT(5)(a0)
C_ST a1,UNIT(6)(a0)
C_ST a1,UNIT(7)(a0)
PTR_ADDIU a0,a0,UNIT(8)
/* Here we have less than 32(64) bytes to set. Set up for a loop to
copy one word (or double word) at a time. Set a2 to count how many
bytes we have to copy after all the word (or double word) chunks are
copied and a3 to the dest pointer after all the (d)word chunks have
been copied. We will loop, incrementing a0 until a0 equals a3. */
L(chk1w):
andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */
beq a2,t8,L(lastb)
PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */
PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */
/* copying in words (4-byte or 8 byte chunks) */
L(wordCopy_loop):
PTR_ADDIU a0,a0,UNIT(1)
bne a0,a3,L(wordCopy_loop)
C_ST a1,UNIT(-1)(a0)
/* Copy the last 8 (or 16) bytes */
L(lastb):
blez a2,L(leave)
PTR_ADDU a3,a0,a2 /* a3 is the last dst address */
L(lastbloop):
PTR_ADDIU a0,a0,1
bne a0,a3,L(lastbloop)
sb a1,-1(a0)
L(leave):
# store last 0-3 bytes
# this will repeat the last store if the memset finishes on a word boundary
.Llast4aligned:
j ra
nop
SWLO a1,-1(t0)
.Llast4:
beq a0,t0,.Llast4e
.Llast4l:
addiu a0,a0,1
bne a0,t0,.Llast4l
sb a1,-1(a0)
.Llast4e:
j ra
nop
.set at
.set reorder
END(MEMSET_NAME)
#ifndef __ANDROID__
# ifdef _LIBC
libc_hidden_builtin_def (MEMSET_NAME)
# endif
#endif
END(memset)
/************************************************************************
* Implementation : Static functions
************************************************************************/

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2015 The Android Open Source Project
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -25,16 +25,20 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <string.h>
#include <stdint.h>
#undef _FORTIFY_SOURCE
void* memset(void* dst, int c, size_t n)
{
char* q = dst;
char* end = q + n;
#include <unistd.h>
#include "private/libc_logging.h"
for (;;) {
if (q >= end) break; *q++ = (char) c;
if (q >= end) break; *q++ = (char) c;
if (q >= end) break; *q++ = (char) c;
if (q >= end) break; *q++ = (char) c;
}
extern char* __getcwd_chk(char* buf, size_t len, size_t buflen) {
if (__predict_false(len > buflen)) {
__fortify_chk_fail("getcwd: prevented write past end of buffer", 0);
}
return getcwd(buf, len);
return dst;
}

View File

@@ -1,260 +0,0 @@
/*
* Copyright (c) 2014
* Imagination Technologies Limited.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY IMAGINATION TECHNOLOGIES LIMITED ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL IMAGINATION TECHNOLOGIES LIMITED BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef __ANDROID__
# include <private/bionic_asm.h>
#elif _LIBC
# include <sysdep.h>
# include <regdef.h>
# include <sys/asm.h>
#elif _COMPILING_NEWLIB
# include "machine/asm.h"
# include "machine/regdef.h"
#else
# include <regdef.h>
# include <sys/asm.h>
#endif
/* Technically strcmp should not read past the end of the strings being
compared. We will read a full word that may contain excess bits beyond
the NULL string terminator but unless ENABLE_READAHEAD is set, we will not
read the next word after the end of string. Setting ENABLE_READAHEAD will
improve performance but is technically illegal based on the definition of
strcmp. */
#ifdef ENABLE_READAHEAD
# define DELAY_READ
#else
# define DELAY_READ nop
#endif
/* Testing on a little endian machine showed using CLZ was a
performance loss, so we are not turning it on by default. */
#if defined(ENABLE_CLZ) && (__mips_isa_rev > 1)
# define USE_CLZ
#endif
/* Some asm.h files do not have the L macro definition. */
#ifndef L
# if _MIPS_SIM == _ABIO32
# define L(label) $L ## label
# else
# define L(label) .L ## label
# endif
#endif
/* Some asm.h files do not have the PTR_ADDIU macro definition. */
#ifndef PTR_ADDIU
# if _MIPS_SIM == _ABIO32
# define PTR_ADDIU addiu
# else
# define PTR_ADDIU daddiu
# endif
#endif
/* Allow the routine to be named something else if desired. */
#ifndef STRCMP_NAME
# define STRCMP_NAME strcmp
#endif
#ifdef __ANDROID__
LEAF(STRCMP_NAME, 0)
#else
LEAF(STRCMP_NAME)
#endif
.set nomips16
.set noreorder
or t0, a0, a1
andi t0,0x3
bne t0, zero, L(byteloop)
/* Both strings are 4 byte aligned at this point. */
lui t8, 0x0101
ori t8, t8, 0x0101
lui t9, 0x7f7f
ori t9, 0x7f7f
#define STRCMP32(OFFSET) \
lw v0, OFFSET(a0); \
lw v1, OFFSET(a1); \
subu t0, v0, t8; \
bne v0, v1, L(worddiff); \
nor t1, v0, t9; \
and t0, t0, t1; \
bne t0, zero, L(returnzero)
L(wordloop):
STRCMP32(0)
DELAY_READ
STRCMP32(4)
DELAY_READ
STRCMP32(8)
DELAY_READ
STRCMP32(12)
DELAY_READ
STRCMP32(16)
DELAY_READ
STRCMP32(20)
DELAY_READ
STRCMP32(24)
DELAY_READ
STRCMP32(28)
PTR_ADDIU a0, a0, 32
b L(wordloop)
PTR_ADDIU a1, a1, 32
L(returnzero):
j ra
move v0, zero
L(worddiff):
#ifdef USE_CLZ
subu t0, v0, t8
nor t1, v0, t9
and t1, t0, t1
xor t0, v0, v1
or t0, t0, t1
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
wsbh t0, t0
rotr t0, t0, 16
# endif
clz t1, t0
and t1, 0xf8
# if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
neg t1
addu t1, 24
# endif
rotrv v0, v0, t1
rotrv v1, v1, t1
and v0, v0, 0xff
and v1, v1, 0xff
j ra
subu v0, v0, v1
#else /* USE_CLZ */
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
andi t0, v0, 0xff
beq t0, zero, L(wexit01)
andi t1, v1, 0xff
bne t0, t1, L(wexit01)
srl t8, v0, 8
srl t9, v1, 8
andi t8, t8, 0xff
beq t8, zero, L(wexit89)
andi t9, t9, 0xff
bne t8, t9, L(wexit89)
srl t0, v0, 16
srl t1, v1, 16
andi t0, t0, 0xff
beq t0, zero, L(wexit01)
andi t1, t1, 0xff
bne t0, t1, L(wexit01)
srl t8, v0, 24
srl t9, v1, 24
# else /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
srl t0, v0, 24
beq t0, zero, L(wexit01)
srl t1, v1, 24
bne t0, t1, L(wexit01)
srl t8, v0, 16
srl t9, v1, 16
andi t8, t8, 0xff
beq t8, zero, L(wexit89)
andi t9, t9, 0xff
bne t8, t9, L(wexit89)
srl t0, v0, 8
srl t1, v1, 8
andi t0, t0, 0xff
beq t0, zero, L(wexit01)
andi t1, t1, 0xff
bne t0, t1, L(wexit01)
andi t8, v0, 0xff
andi t9, v1, 0xff
# endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
L(wexit89):
j ra
subu v0, t8, t9
L(wexit01):
j ra
subu v0, t0, t1
#endif /* USE_CLZ */
/* It might seem better to do the 'beq' instruction between the two 'lbu'
instructions so that the nop is not needed but testing showed that this
code is actually faster (based on glibc strcmp test). */
#define BYTECMP01(OFFSET) \
lbu v0, OFFSET(a0); \
lbu v1, OFFSET(a1); \
beq v0, zero, L(bexit01); \
nop; \
bne v0, v1, L(bexit01)
#define BYTECMP89(OFFSET) \
lbu t8, OFFSET(a0); \
lbu t9, OFFSET(a1); \
beq t8, zero, L(bexit89); \
nop; \
bne t8, t9, L(bexit89)
L(byteloop):
BYTECMP01(0)
BYTECMP89(1)
BYTECMP01(2)
BYTECMP89(3)
BYTECMP01(4)
BYTECMP89(5)
BYTECMP01(6)
BYTECMP89(7)
PTR_ADDIU a0, a0, 8
b L(byteloop)
PTR_ADDIU a1, a1, 8
L(bexit01):
j ra
subu v0, v0, v1
L(bexit89):
j ra
subu v0, t8, t9
.set at
.set reorder
END(STRCMP_NAME)
#ifndef __ANDROID__
# ifdef _LIBC
libc_hidden_builtin_def (STRCMP_NAME)
# endif
#endif

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___clock_nanosleep)
ENTRY(__clock_nanosleep)
.set noreorder
.cpload t9
li v0, __NR_clock_nanosleep
@@ -16,5 +16,4 @@ ENTRY(___clock_nanosleep)
j t9
nop
.set reorder
END(___clock_nanosleep)
.hidden ___clock_nanosleep
END(__clock_nanosleep)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___rt_sigqueueinfo)
ENTRY(__rt_sigqueueinfo)
.set noreorder
.cpload t9
li v0, __NR_rt_sigqueueinfo
@@ -16,5 +16,4 @@ ENTRY(___rt_sigqueueinfo)
j t9
nop
.set reorder
END(___rt_sigqueueinfo)
.hidden ___rt_sigqueueinfo
END(__rt_sigqueueinfo)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fgetxattr)
ENTRY(fgetxattr)
.set noreorder
.cpload t9
li v0, __NR_fgetxattr
@@ -16,5 +16,4 @@ ENTRY(___fgetxattr)
j t9
nop
.set reorder
END(___fgetxattr)
.hidden ___fgetxattr
END(fgetxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___flistxattr)
ENTRY(flistxattr)
.set noreorder
.cpload t9
li v0, __NR_flistxattr
@@ -16,5 +16,4 @@ ENTRY(___flistxattr)
j t9
nop
.set reorder
END(___flistxattr)
.hidden ___flistxattr
END(flistxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fsetxattr)
ENTRY(fsetxattr)
.set noreorder
.cpload t9
li v0, __NR_fsetxattr
@@ -16,5 +16,4 @@ ENTRY(___fsetxattr)
j t9
nop
.set reorder
END(___fsetxattr)
.hidden ___fsetxattr
END(fsetxattr)

View File

@@ -2,10 +2,10 @@
#include <private/bionic_asm.h>
ENTRY(process_vm_readv)
ENTRY(ftruncate)
.set noreorder
.cpload t9
li v0, __NR_process_vm_readv
li v0, __NR_ftruncate
syscall
bnez a3, 1f
move a0, v0
@@ -16,4 +16,4 @@ ENTRY(process_vm_readv)
j t9
nop
.set reorder
END(process_vm_readv)
END(ftruncate)

View File

@@ -1,19 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_writev)
.set noreorder
.cpload t9
li v0, __NR_process_vm_writev
syscall
bnez a3, 1f
move a0, v0
j ra
nop
1:
la t9,__set_errno_internal
j t9
nop
.set reorder
END(process_vm_writev)

View File

@@ -32,28 +32,29 @@
#include <unistd.h>
struct kernel_stat {
unsigned int st_dev;
unsigned int st_pad0[3];
unsigned long st_ino;
mode_t st_mode;
__u32 st_nlink;
uid_t st_uid;
gid_t st_gid;
unsigned int st_rdev;
unsigned int st_pad1[3];
__kernel_off_t st_size;
unsigned int _st_atime;
unsigned int st_atime_nsec;
unsigned int _st_mtime;
unsigned int st_mtime_nsec;
unsigned int _st_ctime;
unsigned int st_ctime_nsec;
unsigned int st_blksize;
unsigned int st_pad2;
unsigned long st_blocks;
unsigned int st_dev;
unsigned int st_pad0[3];
unsigned long st_ino;
mode_t st_mode;
__u32 st_nlink;
uid_t st_uid;
gid_t st_gid;
unsigned int st_rdev;
unsigned int st_pad1[3];
__kernel_off_t st_size;
unsigned int _st_atime;
unsigned int st_atime_nsec;
unsigned int _st_mtime;
unsigned int st_mtime_nsec;
unsigned int _st_ctime;
unsigned int st_ctime_nsec;
unsigned int st_blksize;
unsigned int st_pad2;
unsigned long st_blocks;
};
static void copy_stat(struct stat* st, struct kernel_stat* s) {
void copy_stat(struct stat *st, struct kernel_stat *s)
{
st->st_dev = static_cast<dev_t>(s->st_dev);
st->st_ino = static_cast<ino_t>(s->st_ino);
st->st_mode = static_cast<mode_t>(s->st_mode);
@@ -72,17 +73,30 @@ static void copy_stat(struct stat* st, struct kernel_stat* s) {
st->st_ctim.tv_nsec = static_cast<long>(s->st_ctime_nsec);
}
int fstat(int fp, struct stat* st) {
int fstat(int fp, struct stat *st)
{
kernel_stat s;
int ret = syscall(__NR_fstat, fp, &s);
copy_stat(st, &s);
int ret;
ret = syscall (__NR_fstat, fp, &s);
copy_stat (st, &s);
return ret;
}
__strong_alias(fstat64, fstat);
int fstatat(int dirfd, const char* pathname, struct stat* buf, int flags) {
int newfstatat(int dirfd, const char *pathname, struct stat *buf, int flags)
{
kernel_stat s;
int ret = syscall(__NR_newfstatat, dirfd, pathname, &s, flags);
int ret;
ret = syscall(__NR_newfstatat, dirfd, pathname, &s, flags);
copy_stat(buf, &s);
return ret;
}
int fstatat(int dirfd, const char *pathname, struct stat *buf, int flags)
{
kernel_stat s;
int ret;
ret = syscall(__NR_newfstatat, dirfd, pathname, &s, flags);
copy_stat(buf, &s);
return ret;
}

View File

@@ -46,12 +46,6 @@ LEAF(vfork,FRAMESZ)
PTR_SUBU sp, FRAMESZ
#endif
SETUP_GP64(a5, vfork)
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
rdhwr v0, $29 // v0 = tls
REG_L v0, REGSZ*1(v0) // v0 = v0[TLS_SLOT_THREAD_ID ie 1]
sw $0, REGSZ*2+4(v0) // v0->cached_pid_ = 0
LI a0, (CLONE_VM | CLONE_VFORK | SIGCHLD)
move a1, $0
move a2, $0

View File

@@ -5,11 +5,6 @@
#
libc_bionic_src_files_mips64 += \
arch-mips/string/memcmp.c \
arch-mips/string/memcpy.S \
arch-mips/string/memset.S \
arch-mips/string/strcmp.S \
arch-mips/string/strlen.c \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
@@ -17,6 +12,10 @@ libc_bionic_src_files_mips64 += \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
arch-mips/string/memcmp.c \
arch-mips/string/memcpy.c \
arch-mips/string/memset.c \
arch-mips/string/strlen.c \
libc_freebsd_src_files_mips64 += \
upstream-freebsd/lib/libc/string/wcscat.c \
@@ -35,6 +34,7 @@ libc_openbsd_src_files_mips64 += \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
upstream-openbsd/lib/libc/string/strcmp.c \
upstream-openbsd/lib/libc/string/strcpy.c \
upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/strlcpy.c \

View File

@@ -0,0 +1,423 @@
/*
* Copyright (c) 2009
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/************************************************************************
*
* memcpy.S
* Version: "043009"
*
************************************************************************/
/************************************************************************
* Include files
************************************************************************/
#include <private/bionic_asm.h>
/*
* This routine could be optimized for MIPS64. The current code only
* uses MIPS32 instructions.
*/
#if defined(__MIPSEB__)
# define LWHI lwl /* high part is left in big-endian */
# define SWHI swl /* high part is left in big-endian */
# define LWLO lwr /* low part is right in big-endian */
# define SWLO swr /* low part is right in big-endian */
#endif
#if defined(__MIPSEL__)
# define LWHI lwr /* high part is right in little-endian */
# define SWHI swr /* high part is right in little-endian */
# define LWLO lwl /* low part is left in big-endian */
# define SWLO swl /* low part is left in big-endian */
#endif
LEAF(memcpy,0)
.set noreorder
.set noat
/*
* Below we handle the case where memcpy is called with overlapping src and dst.
* Although memcpy is not required to handle this case, some parts of Android like Skia
* rely on such usage. We call memmove to handle such cases.
*/
subu t0,a0,a1
sra AT,t0,31
xor t1,t0,AT
subu t0,t1,AT
sltu AT,t0,a2
beq AT,zero,.Lmemcpy
la t9,memmove
jr t9
nop
.Lmemcpy:
slti AT,a2,8
bne AT,zero,.Llast8
move v0,a0 # memcpy returns the dst pointer
# Test if the src and dst are word-aligned, or can be made word-aligned
xor t8,a1,a0
andi t8,t8,0x3 # t8 is a0/a1 word-displacement
bne t8,zero,.Lunaligned
negu a3,a0
andi a3,a3,0x3 # we need to copy a3 bytes to make a0/a1 aligned
beq a3,zero,.Lchk16w # when a3=0 then the dst (a0) is word-aligned
subu a2,a2,a3 # now a2 is the remining bytes count
LWHI t8,0(a1)
addu a1,a1,a3
SWHI t8,0(a0)
addu a0,a0,a3
# Now the dst/src are mutually word-aligned with word-aligned addresses
.Lchk16w:
andi t8,a2,0x3f # any whole 64-byte chunks?
# t8 is the byte count after 64-byte chunks
beq a2,t8,.Lchk8w # if a2==t8, no 64-byte chunks
# There will be at most 1 32-byte chunk after it
subu a3,a2,t8 # subtract from a2 the reminder
# Here a3 counts bytes in 16w chunks
addu a3,a0,a3 # Now a3 is the final dst after 64-byte chunks
addu t0,a0,a2 # t0 is the "past the end" address
# When in the loop we exercise "pref 30,x(a0)", the a0+x should not be past
# the "t0-32" address
# This means: for x=128 the last "safe" a0 address is "t0-160"
# Alternatively, for x=64 the last "safe" a0 address is "t0-96"
# In the current version we will use "pref 30,128(a0)", so "t0-160" is the limit
subu t9,t0,160 # t9 is the "last safe pref 30,128(a0)" address
pref 0,0(a1) # bring the first line of src, addr 0
pref 0,32(a1) # bring the second line of src, addr 32
pref 0,64(a1) # bring the third line of src, addr 64
pref 30,32(a0) # safe, as we have at least 64 bytes ahead
# In case the a0 > t9 don't use "pref 30" at all
sgtu v1,a0,t9
bgtz v1,.Lloop16w # skip "pref 30,64(a0)" for too short arrays
nop
# otherwise, start with using pref30
pref 30,64(a0)
.Lloop16w:
pref 0,96(a1)
lw t0,0(a1)
bgtz v1,.Lskip_pref30_96 # skip "pref 30,96(a0)"
lw t1,4(a1)
pref 30,96(a0) # continue setting up the dest, addr 96
.Lskip_pref30_96:
lw t2,8(a1)
lw t3,12(a1)
lw t4,16(a1)
lw t5,20(a1)
lw t6,24(a1)
lw t7,28(a1)
pref 0,128(a1) # bring the next lines of src, addr 128
sw t0,0(a0)
sw t1,4(a0)
sw t2,8(a0)
sw t3,12(a0)
sw t4,16(a0)
sw t5,20(a0)
sw t6,24(a0)
sw t7,28(a0)
lw t0,32(a1)
bgtz v1,.Lskip_pref30_128 # skip "pref 30,128(a0)"
lw t1,36(a1)
pref 30,128(a0) # continue setting up the dest, addr 128
.Lskip_pref30_128:
lw t2,40(a1)
lw t3,44(a1)
lw t4,48(a1)
lw t5,52(a1)
lw t6,56(a1)
lw t7,60(a1)
pref 0, 160(a1) # bring the next lines of src, addr 160
sw t0,32(a0)
sw t1,36(a0)
sw t2,40(a0)
sw t3,44(a0)
sw t4,48(a0)
sw t5,52(a0)
sw t6,56(a0)
sw t7,60(a0)
addiu a0,a0,64 # adding 64 to dest
sgtu v1,a0,t9
bne a0,a3,.Lloop16w
addiu a1,a1,64 # adding 64 to src
move a2,t8
# Here we have src and dest word-aligned but less than 64-bytes to go
.Lchk8w:
pref 0, 0x0(a1)
andi t8,a2,0x1f # is there a 32-byte chunk?
# the t8 is the reminder count past 32-bytes
beq a2,t8,.Lchk1w # when a2=t8, no 32-byte chunk
nop
lw t0,0(a1)
lw t1,4(a1)
lw t2,8(a1)
lw t3,12(a1)
lw t4,16(a1)
lw t5,20(a1)
lw t6,24(a1)
lw t7,28(a1)
addiu a1,a1,32
sw t0,0(a0)
sw t1,4(a0)
sw t2,8(a0)
sw t3,12(a0)
sw t4,16(a0)
sw t5,20(a0)
sw t6,24(a0)
sw t7,28(a0)
addiu a0,a0,32
.Lchk1w:
andi a2,t8,0x3 # now a2 is the reminder past 1w chunks
beq a2,t8,.Llast8
subu a3,t8,a2 # a3 is count of bytes in 1w chunks
addu a3,a0,a3 # now a3 is the dst address past the 1w chunks
# copying in words (4-byte chunks)
.LwordCopy_loop:
lw t3,0(a1) # the first t3 may be equal t0 ... optimize?
addiu a1,a1,4
addiu a0,a0,4
bne a0,a3,.LwordCopy_loop
sw t3,-4(a0)
# For the last (<8) bytes
.Llast8:
blez a2,.Lleave
addu a3,a0,a2 # a3 is the last dst address
.Llast8loop:
lb v1,0(a1)
addiu a1,a1,1
addiu a0,a0,1
bne a0,a3,.Llast8loop
sb v1,-1(a0)
.Lleave:
j ra
nop
#
# UNALIGNED case
#
.Lunaligned:
# got here with a3="negu a0"
andi a3,a3,0x3 # test if the a0 is word aligned
beqz a3,.Lua_chk16w
subu a2,a2,a3 # bytes left after initial a3 bytes
LWHI v1,0(a1)
LWLO v1,3(a1)
addu a1,a1,a3 # a3 may be here 1, 2 or 3
SWHI v1,0(a0)
addu a0,a0,a3 # below the dst will be word aligned (NOTE1)
.Lua_chk16w:
andi t8,a2,0x3f # any whole 64-byte chunks?
# t8 is the byte count after 64-byte chunks
beq a2,t8,.Lua_chk8w # if a2==t8, no 64-byte chunks
# There will be at most 1 32-byte chunk after it
subu a3,a2,t8 # subtract from a2 the reminder
# Here a3 counts bytes in 16w chunks
addu a3,a0,a3 # Now a3 is the final dst after 64-byte chunks
addu t0,a0,a2 # t0 is the "past the end" address
subu t9,t0,160 # t9 is the "last safe pref 30,128(a0)" address
pref 0,0(a1) # bring the first line of src, addr 0
pref 0,32(a1) # bring the second line of src, addr 32
pref 0,64(a1) # bring the third line of src, addr 64
pref 30,32(a0) # safe, as we have at least 64 bytes ahead
# In case the a0 > t9 don't use "pref 30" at all
sgtu v1,a0,t9
bgtz v1,.Lua_loop16w # skip "pref 30,64(a0)" for too short arrays
nop
# otherwise, start with using pref30
pref 30,64(a0)
.Lua_loop16w:
pref 0,96(a1)
LWHI t0,0(a1)
LWLO t0,3(a1)
LWHI t1,4(a1)
bgtz v1,.Lua_skip_pref30_96
LWLO t1,7(a1)
pref 30,96(a0) # continue setting up the dest, addr 96
.Lua_skip_pref30_96:
LWHI t2,8(a1)
LWLO t2,11(a1)
LWHI t3,12(a1)
LWLO t3,15(a1)
LWHI t4,16(a1)
LWLO t4,19(a1)
LWHI t5,20(a1)
LWLO t5,23(a1)
LWHI t6,24(a1)
LWLO t6,27(a1)
LWHI t7,28(a1)
LWLO t7,31(a1)
pref 0,128(a1) # bring the next lines of src, addr 128
sw t0,0(a0)
sw t1,4(a0)
sw t2,8(a0)
sw t3,12(a0)
sw t4,16(a0)
sw t5,20(a0)
sw t6,24(a0)
sw t7,28(a0)
LWHI t0,32(a1)
LWLO t0,35(a1)
LWHI t1,36(a1)
bgtz v1,.Lua_skip_pref30_128
LWLO t1,39(a1)
pref 30,128(a0) # continue setting up the dest, addr 128
.Lua_skip_pref30_128:
LWHI t2,40(a1)
LWLO t2,43(a1)
LWHI t3,44(a1)
LWLO t3,47(a1)
LWHI t4,48(a1)
LWLO t4,51(a1)
LWHI t5,52(a1)
LWLO t5,55(a1)
LWHI t6,56(a1)
LWLO t6,59(a1)
LWHI t7,60(a1)
LWLO t7,63(a1)
pref 0, 160(a1) # bring the next lines of src, addr 160
sw t0,32(a0)
sw t1,36(a0)
sw t2,40(a0)
sw t3,44(a0)
sw t4,48(a0)
sw t5,52(a0)
sw t6,56(a0)
sw t7,60(a0)
addiu a0,a0,64 # adding 64 to dest
sgtu v1,a0,t9
bne a0,a3,.Lua_loop16w
addiu a1,a1,64 # adding 64 to src
move a2,t8
# Here we have src and dest word-aligned but less than 64-bytes to go
.Lua_chk8w:
pref 0, 0x0(a1)
andi t8,a2,0x1f # is there a 32-byte chunk?
# the t8 is the reminder count
beq a2,t8,.Lua_chk1w # when a2=t8, no 32-byte chunk
nop
LWHI t0,0(a1)
LWLO t0,3(a1)
LWHI t1,4(a1)
LWLO t1,7(a1)
LWHI t2,8(a1)
LWLO t2,11(a1)
LWHI t3,12(a1)
LWLO t3,15(a1)
LWHI t4,16(a1)
LWLO t4,19(a1)
LWHI t5,20(a1)
LWLO t5,23(a1)
LWHI t6,24(a1)
LWLO t6,27(a1)
LWHI t7,28(a1)
LWLO t7,31(a1)
addiu a1,a1,32
sw t0,0(a0)
sw t1,4(a0)
sw t2,8(a0)
sw t3,12(a0)
sw t4,16(a0)
sw t5,20(a0)
sw t6,24(a0)
sw t7,28(a0)
addiu a0,a0,32
.Lua_chk1w:
andi a2,t8,0x3 # now a2 is the reminder past 1w chunks
beq a2,t8,.Lua_smallCopy
subu a3,t8,a2 # a3 is count of bytes in 1w chunks
addu a3,a0,a3 # now a3 is the dst address past the 1w chunks
# copying in words (4-byte chunks)
.Lua_wordCopy_loop:
LWHI v1,0(a1)
LWLO v1,3(a1)
addiu a1,a1,4
addiu a0,a0,4 # note: dst=a0 is word aligned here, see NOTE1
bne a0,a3,.Lua_wordCopy_loop
sw v1,-4(a0)
# Now less than 4 bytes (value in a2) left to copy
.Lua_smallCopy:
beqz a2,.Lleave
addu a3,a0,a2 # a3 is the last dst address
.Lua_smallCopy_loop:
lb v1,0(a1)
addiu a1,a1,1
addiu a0,a0,1
bne a0,a3,.Lua_smallCopy_loop
sb v1,-1(a0)
j ra
nop
.set at
.set reorder
END(memcpy)
/************************************************************************
* Implementation : Static functions
************************************************************************/

View File

@@ -0,0 +1,242 @@
/*
* Copyright (c) 2009
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/************************************************************************
*
* memset.S, version "64h" with 1 cache line horizon for "pref 30" and 14 nops
* Version: "043009"
*
************************************************************************/
/************************************************************************
* Include files
************************************************************************/
#include <private/bionic_asm.h>
/*
* This routine could be optimized for MIPS64. The current code only
* uses MIPS32 instructions.
*/
#if defined(__MIPSEB__)
# define SWHI swl /* high part is left in big-endian */
# define SWLO swr /* low part is right in big-endian */
#endif
#if defined(__MIPSEL__)
# define SWHI swr /* high part is right in little-endian */
# define SWLO swl /* low part is left in little-endian */
#endif
#if !(defined(XGPROF) || defined(XPROF))
#undef SETUP_GP
#define SETUP_GP
#endif
#ifdef NDEBUG
#define DBG #
#else
#define DBG
#endif
LEAF(memset,0)
.set noreorder
.set noat
addu t0,a0,a2 # t0 is the "past the end" address
slti AT,a2,4 # is a2 less than 4?
bne AT,zero,.Llast4 # if yes, go to last4
move v0,a0 # memset returns the dst pointer
beq a1,zero,.Lset0
subu v1,zero,a0
# smear byte into 32 bit word
#if (__mips==32) && (__mips_isa_rev>=2)
ins a1, a1, 8, 8 # Replicate fill byte into half-word.
ins a1, a1, 16, 16 # Replicate fill byte into word.
#else
and a1,0xff
sll AT,a1,8
or a1,AT
sll AT,a1,16
or a1,AT
#endif
.Lset0:
andi v1,v1,0x3 # word-unaligned address?
beq v1,zero,.Laligned # v1 is the unalignment count
subu a2,a2,v1
SWHI a1,0(a0)
addu a0,a0,v1
# Here we have the "word-aligned" a0 (until the "last4")
.Laligned:
andi t8,a2,0x3f # any 64-byte chunks?
# t8 is the byte count past 64-byte chunks
beq a2,t8,.Lchk8w # when a2==t8, no 64-byte chunks
# There will be at most 1 32-byte chunk then
subu a3,a2,t8 # subtract from a2 the reminder
# Here a3 counts bytes in 16w chunks
addu a3,a0,a3 # Now a3 is the final dst after 64-byte chunks
# Find out, if there are any 64-byte chunks after which will be still at least
# 96 bytes left. The value "96" is calculated as needed buffer for
# "pref 30,64(a0)" prefetch, which can be used as "pref 30,0(a0)" after
# incrementing "a0" by 64.
# For "a2" below 160 there will be no such "pref 30 safe" 64-byte chunk.
#
sltiu v1,a2,160
bgtz v1,.Lloop16w_nopref30 # skip "pref 30,0(a0)"
subu t7,a2,96 # subtract "pref 30 unsafe" region
# below we have at least 1 64-byte chunk which is "pref 30 safe"
andi t6,t7,0x3f # t6 is past "64-byte safe chunks" reminder
subu t5,t7,t6 # subtract from t7 the reminder
# Here t5 counts bytes in 16w "safe" chunks
addu t4,a0,t5 # Now t4 is the dst after 64-byte "safe" chunks
# Don't use "pref 30,0(a0)" for a0 in a "middle" of a cache line
# pref 30,0(a0)
# Here we are in the region, where it is safe to use "pref 30,64(a0)"
.Lloop16w:
addiu a0,a0,64
pref 30,-32(a0) # continue setting up the dest, addr 64-32
sw a1,-64(a0)
sw a1,-60(a0)
sw a1,-56(a0)
sw a1,-52(a0)
sw a1,-48(a0)
sw a1,-44(a0)
sw a1,-40(a0)
sw a1,-36(a0)
nop
nop # the extra nop instructions help to balance
nop # cycles needed for "store" + "fill" + "evict"
nop # For 64byte store there are needed 8 fill
nop # and 8 evict cycles, i.e. at least 32 instr.
nop
nop
pref 30,0(a0) # continue setting up the dest, addr 64-0
sw a1,-32(a0)
sw a1,-28(a0)
sw a1,-24(a0)
sw a1,-20(a0)
sw a1,-16(a0)
sw a1,-12(a0)
sw a1,-8(a0)
sw a1,-4(a0)
nop
nop
nop
nop # NOTE: adding 14 nop-s instead of 12 nop-s
nop # gives better results for "fast" memory
nop
bne a0,t4,.Lloop16w
nop
beq a0,a3,.Lchk8w # maybe no more 64-byte chunks?
nop # this "delayed slot" is useless ...
.Lloop16w_nopref30: # there could be up to 3 "64-byte nopref30" chunks
addiu a0,a0,64
sw a1,-64(a0)
sw a1,-60(a0)
sw a1,-56(a0)
sw a1,-52(a0)
sw a1,-48(a0)
sw a1,-44(a0)
sw a1,-40(a0)
sw a1,-36(a0)
sw a1,-32(a0)
sw a1,-28(a0)
sw a1,-24(a0)
sw a1,-20(a0)
sw a1,-16(a0)
sw a1,-12(a0)
sw a1,-8(a0)
bne a0,a3,.Lloop16w_nopref30
sw a1,-4(a0)
.Lchk8w: # t8 here is the byte count past 64-byte chunks
andi t7,t8,0x1f # is there a 32-byte chunk?
# the t7 is the reminder count past 32-bytes
beq t8,t7,.Lchk1w # when t8==t7, no 32-byte chunk
move a2,t7
sw a1,0(a0)
sw a1,4(a0)
sw a1,8(a0)
sw a1,12(a0)
sw a1,16(a0)
sw a1,20(a0)
sw a1,24(a0)
sw a1,28(a0)
addiu a0,a0,32
.Lchk1w:
andi t8,a2,0x3 # now t8 is the reminder past 1w chunks
beq a2,t8,.Llast4aligned
subu a3,a2,t8 # a3 is the count of bytes in 1w chunks
addu a3,a0,a3 # now a3 is the dst address past the 1w chunks
# copying in words (4-byte chunks)
.LwordCopy_loop:
addiu a0,a0,4
bne a0,a3,.LwordCopy_loop
sw a1,-4(a0)
# store last 0-3 bytes
# this will repeat the last store if the memset finishes on a word boundary
.Llast4aligned:
j ra
SWLO a1,-1(t0)
.Llast4:
beq a0,t0,.Llast4e
.Llast4l:
addiu a0,a0,1
bne a0,t0,.Llast4l
sb a1,-1(a0)
.Llast4e:
j ra
nop
.set at
.set reorder
END(memset)
/************************************************************************
* Implementation : Static functions
************************************************************************/

View File

@@ -0,0 +1,148 @@
/*
* Copyright (c) 2010 MIPS Technologies, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with
* the distribution.
* * Neither the name of MIPS Technologies Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MIPS_STRING_OPS_H
#define __MIPS_STRING_OPS_H
/* This definition of the byte bitfields uses the
assumption that the layout of the bitfields is
equivalent to the layout in memory. Generally,
for the MIPS ABIs, this is true. If you compile
the strcmp.c file with -DSMOKE_TEST_NEW_STRCMP,
this assumption will be tested.
Also, regardless of char signedness, ANSI C dictates that
strcmp() treats each character as unsigned char. For
strlen and the like, signedness doesn't matter.
Also, this code assumes that there are 8-bits per 'char'. */
#if __mips64
typedef struct bits
{
unsigned long B0:8, B1:8, B2:8, B3:8, B4:8, B5:8, B6:8, B7:8;
} bits_t;
#else
typedef struct bits
{
unsigned long B0:8, B1:8, B2:8, B3:8;
} bits_t;
#endif
#ifndef _ULW
/* for MIPS GCC, there is no unaligned builtins - so this code forces
the compiler to treat the pointer access as unaligned. */
struct ulw
{
unsigned long b;
} __attribute__ ((packed));
#define _ULW(__x) ((struct ulw *) ((char *)(&__x)))->b;
#endif
/* This union assumes that small structures can be in registers. If
not, then memory accesses will be done - not optimal, but ok. */
typedef union
{
unsigned long v;
bits_t b;
} bitfields_t;
#ifndef detect_zero
/* __mips_dsp, __mips_dspr2, and __mips64 are predefined by
the compiler, based on command line options. */
#if (__mips_dsp || __mips_dspr2) && !__mips64
#define __mips_using_dsp 1
/* DSP 4-lane (8 unsigned bits per line) subtract and saturate
* Intrinsic operation. How this works:
* Given a 4-byte string of "ABC\0", subtract this as
* an unsigned integer from 0x01010101:
* 0x01010101
* - 0x41424300
* -----------
( 0xbfbebe01 <-- answer without saturation
* 0x00000001 <-- answer with saturation
* When this 4-lane vector is treated as an unsigned int value,
* a non-zero answer indicates the presence of a zero in the
* original 4-byte argument. */
typedef signed char v4i8 __attribute__ ((vector_size (4)));
#define detect_zero(__x,__y,__01s,__80s)\
((unsigned) __builtin_mips_subu_s_qb((v4i8) __01s,(v4i8) __x))
/* sets all 4 lanes to requested byte. */
#define set_byte_lanes(__x) ((unsigned) __builtin_mips_repl_qb(__x))
/* sets all 4 lanes to 0x01. */
#define def_and_set_01(__x) unsigned long __x = (unsigned) __builtin_mips_repl_qb(0x01)
/* sets all 4 lanes to 0x80. Not needed when subu_s.qb used. */
#define def_and_set_80(__x) /* do nothing */
#else
/* this version, originally published in the 80's, uses
a reverse-carry-set like determination of the zero byte.
The steps are, for __x = 0x31ff0001:
__x - _01s = 0x30fdff00
~__x = 0xce00fffe
((__x - _01s) & ~__x) = 0x0000ff00
x & _80s = 0x00008000 <- byte 3 was zero
Some implementaions naively assume that characters are
always 7-bit unsigned ASCII. With that assumption, the
"& ~x" is usually discarded. Since character strings
are 8-bit, the and is needed to catch the case of
a false positive when the byte is 0x80. */
#define detect_zero(__x,__y,_01s,_80s)\
((unsigned) (((__x) - _01s) & ~(__x)) & _80s)
#if __mips64
#define def_and_set_80(__x) unsigned long __x = 0x8080808080808080ul
#define def_and_set_01(__x) unsigned long __x = 0x0101010101010101ul
#else
#define def_and_set_80(__x) unsigned long __x = 0x80808080ul
#define def_and_set_01(__x) unsigned long __x = 0x01010101ul
#endif
#endif
#endif
/* dealing with 'void *' conversions without using extra variables. */
#define get_byte(__x,__idx) (((unsigned char *) (__x))[__idx])
#define set_byte(__x,__idx,__fill) ((unsigned char *) (__x))[__idx] = (__fill)
#define get_word(__x,__idx) (((unsigned long *) (__x))[__idx])
#define set_word(__x,__idx,__fill) ((unsigned long *) (__x))[__idx] = (__fill)
#define inc_ptr_as(__type,__x,__inc) __x = (void *) (((__type) __x) + (__inc))
#define cvt_ptr_to(__type,__x) ((__type) (__x))
#endif

View File

@@ -0,0 +1,224 @@
/*
* Copyright (c) 2010 MIPS Technologies, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with
* the distribution.
* * Neither the name of MIPS Technologies Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include "mips-string-ops.h"
#define do_strlen_word(__av) {\
if (detect_zero(x,x,_01s,_80s)) break;\
x = __av;\
cnt += sizeof (unsigned);\
}
#define do_strlen_byte(__x) {\
if ((bx.b.B##__x) == 0) break;\
++cnt;\
}
#if SMOKE_TEST_MIPS_STRLEN
#define strlen my_strlen
#endif
size_t
strlen (const char *_a)
{
int cnt = 0;
unsigned long x;
/* align the string to word boundary so we can do word at a time. */
if ((cvt_ptr_to (unsigned long, _a) & (sizeof (unsigned long) - 1)) != 0)
{
if ((cvt_ptr_to (unsigned long, _a) & 1) != 0)
{
if (get_byte (_a, 0) == 0)
return cnt;
/* set bit 1 so 2-bytes are checked and incremented. */
inc_ptr_as (char *, _a, 1);
++cnt;
}
if ((cvt_ptr_to (unsigned long, _a) & 2) != 0)
{
if (get_byte (_a, 0) == 0)
return cnt + 0;
if (get_byte (_a, 1) == 0)
return cnt + 1;
inc_ptr_as (char *, _a, 2);
cnt += 2;
}
}
#if __mips64
#error strlen: mips64 check for 4-byte alignment not implemented.
#endif
if (1)
{
def_and_set_01 (_01s);
def_and_set_80 (_80s);
/* as advantagous as it is to performance, this code cannot pre-load
the following word, nor can it prefetch the next line at the start
of the loop since the string can be at the end of a page with the
following page unmapped. There are tests in the suite to catch
any attempt to go beyond the current word. */
x = get_word (_a, 0);
while (1)
{
/* doing 8 words should cover most strings. */
do_strlen_word (get_word (_a, 1));
do_strlen_word (get_word (_a, 2));
do_strlen_word (get_word (_a, 3));
do_strlen_word (get_word (_a, 4));
do_strlen_word (get_word (_a, 5));
do_strlen_word (get_word (_a, 6));
do_strlen_word (get_word (_a, 7));
do_strlen_word (get_word (_a, 8));
inc_ptr_as (unsigned long*, _a, 8);
}
}
while (1)
{
/* pull apart the last word processed and find the zero. */
bitfields_t bx;
bx.v = x;
#if __mips64
do_strlen_byte (0);
do_strlen_byte (1);
do_strlen_byte (2);
do_strlen_byte (3);
do_strlen_byte (4);
do_strlen_byte (5);
do_strlen_byte (6);
#else
do_strlen_byte (0);
do_strlen_byte (1);
do_strlen_byte (2);
#endif
/* last byte is zero */
break;
}
return cnt;
}
#undef do_strlen_byte
#undef do_strlen_word
#if SMOKE_TEST_MIPS_STRLEN
#include <stdio.h>
char str1[] = "DHRYSTONE PROGRAM, 1'ST STRING";
char str2[] = "DHRYSTONE PROGRAM, 2'ST STRING";
char str3[] = "another string";
char str4[] = "another";
char str5[] = "somes tring";
char str6[] = "somes_tring";
char str7[16], str8[16];
static char *
chk (unsigned long mine, unsigned long libs, int *errors)
{
static char answer[1024];
char *result = mine == libs ? "PASS" : "FAIL";
sprintf (answer, "new_strlen=%d: lib_strlen=%d: %s!", mine, libs, result);
if (mine != libs)
(*errors)++;
return answer;
}
int
main (int argc, char **argv)
{
int errors = 0;
/* set -1 in one position */
str6[5] = 0xff;
/* set zero in same position with junk in following 3 */
str7[0] = str8[0] = 0;
str7[1] = 0xff;
str7[2] = 'a';
str7[3] = 2;
str8[1] = 's';
str8[2] = -2;
str8[3] = 0;
fprintf (stderr, "========== mips_strlen%s test...\n",
argv[0] ? argv[0] : "unknown strlen");
#define P(__x,__y) {\
int a = my_strlen(__x + __y);\
int b = (strlen)(__x + __y) /* library version */;\
fprintf(stderr,"%s+%d: %s\n",#__x,__y,chk(a,b,&errors));\
}
P (str1, 0);
P (str1, 1);
P (str1, 2);
P (str1, 3);
P (str2, 0);
P (str2, 1);
P (str2, 2);
P (str2, 3);
P (str3, 0);
P (str3, 1);
P (str3, 2);
P (str3, 3);
P (str4, 0);
P (str4, 1);
P (str4, 2);
P (str4, 3);
P (str5, 0);
P (str5, 1);
P (str5, 2);
P (str5, 3);
P (str6, 0);
P (str6, 1);
P (str6, 2);
P (str6, 3);
P (str7, 0);
P (str7, 1);
P (str7, 2);
P (str7, 3);
P (str8, 0);
P (str8, 1);
P (str8, 2);
P (str8, 3);
return errors;
}
#endif

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___clock_nanosleep)
ENTRY(__clock_nanosleep)
.set push
.set noreorder
li v0, __NR_clock_nanosleep
@@ -22,5 +22,5 @@ ENTRY(___clock_nanosleep)
j t9
move ra, t0
.set pop
END(___clock_nanosleep)
.hidden ___clock_nanosleep
END(__clock_nanosleep)
.hidden __clock_nanosleep

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___rt_sigqueueinfo)
ENTRY(__rt_sigqueueinfo)
.set push
.set noreorder
li v0, __NR_rt_sigqueueinfo
@@ -22,5 +22,5 @@ ENTRY(___rt_sigqueueinfo)
j t9
move ra, t0
.set pop
END(___rt_sigqueueinfo)
.hidden ___rt_sigqueueinfo
END(__rt_sigqueueinfo)
.hidden __rt_sigqueueinfo

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fgetxattr)
ENTRY(fgetxattr)
.set push
.set noreorder
li v0, __NR_fgetxattr
@@ -22,5 +22,4 @@ ENTRY(___fgetxattr)
j t9
move ra, t0
.set pop
END(___fgetxattr)
.hidden ___fgetxattr
END(fgetxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___flistxattr)
ENTRY(flistxattr)
.set push
.set noreorder
li v0, __NR_flistxattr
@@ -22,5 +22,4 @@ ENTRY(___flistxattr)
j t9
move ra, t0
.set pop
END(___flistxattr)
.hidden ___flistxattr
END(flistxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fsetxattr)
ENTRY(fsetxattr)
.set push
.set noreorder
li v0, __NR_fsetxattr
@@ -22,5 +22,4 @@ ENTRY(___fsetxattr)
j t9
move ra, t0
.set pop
END(___fsetxattr)
.hidden ___fsetxattr
END(fsetxattr)

View File

@@ -1,25 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_readv)
.set push
.set noreorder
li v0, __NR_process_vm_readv
syscall
bnez a3, 1f
move a0, v0
j ra
nop
1:
move t0, ra
bal 2f
nop
2:
.cpsetup ra, t1, 2b
LA t9,__set_errno_internal
.cpreturn
j t9
move ra, t0
.set pop
END(process_vm_readv)

View File

@@ -1,25 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_writev)
.set push
.set noreorder
li v0, __NR_process_vm_writev
syscall
bnez a3, 1f
move a0, v0
j ra
nop
1:
move t0, ra
bal 2f
nop
2:
.cpsetup ra, t1, 2b
LA t9,__set_errno_internal
.cpreturn
j t9
move ra, t0
.set pop
END(process_vm_writev)

View File

@@ -34,12 +34,6 @@ ENTRY(vfork)
popl %ecx // Grab the return address.
.cfi_adjust_cfa_offset 4
.cfi_rel_offset ecx, 0
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
movl %gs:0, %eax
movl 4(%eax), %eax
movl $0, 12(%eax)
movl $__NR_vfork, %eax
int $0x80
cmpl $-MAX_ERRNO, %eax

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___clock_nanosleep)
ENTRY(__clock_nanosleep)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -33,5 +33,4 @@ ENTRY(___clock_nanosleep)
popl %ecx
popl %ebx
ret
END(___clock_nanosleep)
.hidden ___clock_nanosleep
END(__clock_nanosleep)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___rt_sigqueueinfo)
ENTRY(__rt_sigqueueinfo)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -28,5 +28,4 @@ ENTRY(___rt_sigqueueinfo)
popl %ecx
popl %ebx
ret
END(___rt_sigqueueinfo)
.hidden ___rt_sigqueueinfo
END(__rt_sigqueueinfo)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(__clock_gettime)
ENTRY(clock_gettime)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -23,4 +23,4 @@ ENTRY(__clock_gettime)
popl %ecx
popl %ebx
ret
END(__clock_gettime)
END(clock_gettime)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fgetxattr)
ENTRY(fgetxattr)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -33,5 +33,4 @@ ENTRY(___fgetxattr)
popl %ecx
popl %ebx
ret
END(___fgetxattr)
.hidden ___fgetxattr
END(fgetxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___flistxattr)
ENTRY(flistxattr)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -28,5 +28,4 @@ ENTRY(___flistxattr)
popl %ecx
popl %ebx
ret
END(___flistxattr)
.hidden ___flistxattr
END(flistxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fsetxattr)
ENTRY(fsetxattr)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -38,5 +38,4 @@ ENTRY(___fsetxattr)
popl %ecx
popl %ebx
ret
END(___fsetxattr)
.hidden ___fsetxattr
END(fsetxattr)

View File

@@ -0,0 +1,26 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(ftruncate)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
pushl %ecx
.cfi_adjust_cfa_offset 4
.cfi_rel_offset ecx, 0
mov 12(%esp), %ebx
mov 16(%esp), %ecx
movl $__NR_ftruncate, %eax
int $0x80
cmpl $-MAX_ERRNO, %eax
jb 1f
negl %eax
pushl %eax
call __set_errno_internal
addl $4, %esp
1:
popl %ecx
popl %ebx
ret
END(ftruncate)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(__gettimeofday)
ENTRY(gettimeofday)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -23,4 +23,4 @@ ENTRY(__gettimeofday)
popl %ecx
popl %ebx
ret
END(__gettimeofday)
END(gettimeofday)

View File

@@ -1,46 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_readv)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
pushl %ecx
.cfi_adjust_cfa_offset 4
.cfi_rel_offset ecx, 0
pushl %edx
.cfi_adjust_cfa_offset 4
.cfi_rel_offset edx, 0
pushl %esi
.cfi_adjust_cfa_offset 4
.cfi_rel_offset esi, 0
pushl %edi
.cfi_adjust_cfa_offset 4
.cfi_rel_offset edi, 0
pushl %ebp
.cfi_adjust_cfa_offset 4
.cfi_rel_offset ebp, 0
mov 28(%esp), %ebx
mov 32(%esp), %ecx
mov 36(%esp), %edx
mov 40(%esp), %esi
mov 44(%esp), %edi
mov 48(%esp), %ebp
movl $__NR_process_vm_readv, %eax
int $0x80
cmpl $-MAX_ERRNO, %eax
jb 1f
negl %eax
pushl %eax
call __set_errno_internal
addl $4, %esp
1:
popl %ebp
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
ret
END(process_vm_readv)

View File

@@ -1,46 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_writev)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
pushl %ecx
.cfi_adjust_cfa_offset 4
.cfi_rel_offset ecx, 0
pushl %edx
.cfi_adjust_cfa_offset 4
.cfi_rel_offset edx, 0
pushl %esi
.cfi_adjust_cfa_offset 4
.cfi_rel_offset esi, 0
pushl %edi
.cfi_adjust_cfa_offset 4
.cfi_rel_offset edi, 0
pushl %ebp
.cfi_adjust_cfa_offset 4
.cfi_rel_offset ebp, 0
mov 28(%esp), %ebx
mov 32(%esp), %ecx
mov 36(%esp), %edx
mov 40(%esp), %esi
mov 44(%esp), %edi
mov 48(%esp), %ebp
movl $__NR_process_vm_writev, %eax
int $0x80
cmpl $-MAX_ERRNO, %eax
jb 1f
negl %eax
pushl %eax
call __set_errno_internal
addl $4, %esp
1:
popl %ebp
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
ret
END(process_vm_writev)

View File

@@ -32,12 +32,6 @@
ENTRY(vfork)
popq %rdi // Grab the return address.
// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
mov %fs:0, %rax
mov 8(%rax), %rax
movl $0, 20(%rax)
movl $__NR_vfork, %eax
syscall
pushq %rdi // Restore the return address.

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___clock_nanosleep)
ENTRY(__clock_nanosleep)
movq %rcx, %r10
movl $__NR_clock_nanosleep, %eax
syscall
@@ -13,5 +13,5 @@ ENTRY(___clock_nanosleep)
call __set_errno_internal
1:
ret
END(___clock_nanosleep)
.hidden ___clock_nanosleep
END(__clock_nanosleep)
.hidden __clock_nanosleep

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___rt_sigqueueinfo)
ENTRY(__rt_sigqueueinfo)
movl $__NR_rt_sigqueueinfo, %eax
syscall
cmpq $-MAX_ERRNO, %rax
@@ -12,5 +12,5 @@ ENTRY(___rt_sigqueueinfo)
call __set_errno_internal
1:
ret
END(___rt_sigqueueinfo)
.hidden ___rt_sigqueueinfo
END(__rt_sigqueueinfo)
.hidden __rt_sigqueueinfo

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fgetxattr)
ENTRY(fgetxattr)
movq %rcx, %r10
movl $__NR_fgetxattr, %eax
syscall
@@ -13,5 +13,4 @@ ENTRY(___fgetxattr)
call __set_errno_internal
1:
ret
END(___fgetxattr)
.hidden ___fgetxattr
END(fgetxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___flistxattr)
ENTRY(flistxattr)
movl $__NR_flistxattr, %eax
syscall
cmpq $-MAX_ERRNO, %rax
@@ -12,5 +12,4 @@ ENTRY(___flistxattr)
call __set_errno_internal
1:
ret
END(___flistxattr)
.hidden ___flistxattr
END(flistxattr)

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(___fsetxattr)
ENTRY(fsetxattr)
movq %rcx, %r10
movl $__NR_fsetxattr, %eax
syscall
@@ -13,5 +13,4 @@ ENTRY(___fsetxattr)
call __set_errno_internal
1:
ret
END(___fsetxattr)
.hidden ___fsetxattr
END(fsetxattr)

View File

@@ -1,16 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_readv)
movq %rcx, %r10
movl $__NR_process_vm_readv, %eax
syscall
cmpq $-MAX_ERRNO, %rax
jb 1f
negl %eax
movl %eax, %edi
call __set_errno_internal
1:
ret
END(process_vm_readv)

View File

@@ -1,16 +0,0 @@
/* Generated by gensyscalls.py. Do not edit. */
#include <private/bionic_asm.h>
ENTRY(process_vm_writev)
movq %rcx, %r10
movl $__NR_process_vm_writev, %eax
syscall
cmpq $-MAX_ERRNO, %rax
jb 1f
negl %eax
movl %eax, %edi
call __set_errno_internal
1:
ret
END(process_vm_writev)

View File

@@ -1,47 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#undef _FORTIFY_SOURCE
#include <stdio.h>
#include <sys/cdefs.h>
#include "private/libc_logging.h"
extern "C" size_t __fread_chk(void * __restrict buf, size_t size, size_t count,
FILE * __restrict stream, size_t buf_size) {
size_t total;
if (__predict_false(__size_mul_overflow(size, count, &total))) {
// overflow: trigger the error path in fread
return fread(buf, size, count, stream);
}
if (__predict_false(total > buf_size)) {
__fortify_chk_fail("fread: prevented write past end of buffer", 0);
}
return fread(buf, size, count, stream);
}

Some files were not shown because too many files have changed in this diff Show More