Merge commit '52e7d3d91ab6a5bab77c5dfb1ed47381fd52f9ba' into mm

Conflicts:
	libc/Android.mk
	libc/arch-arm/include/machine/cpu-features.h
	libc/bionic/pthread.c
	libc/bionic/semaphore.c

Change-Id: I599b069b71e381f8beb6c06acfde86b15e049273
This commit is contained in:
Brian Carlstrom 2010-08-17 15:12:33 -07:00
commit 019c5d920a
360 changed files with 19530 additions and 830 deletions

10
ThirdPartyProject.prop Normal file
View File

@ -0,0 +1,10 @@
# Copyright 2010 Google Inc. All Rights Reserved.
#Fri Jul 16 10:03:08 PDT 2010
currentVersion=Unknown
version=Unknown
isNative=true
feedurl=http\://www.openbsd.org/security.html
name=openbsd
keywords=openbsd
onDevice=true
homepage=http\://openbsd.org

View File

@ -14,6 +14,7 @@ libc_common_src_files := \
unistd/exec.c \
unistd/fcntl.c \
unistd/fnmatch.c \
unistd/fstatfs.c \
unistd/ftime.c \
unistd/ftok.c \
unistd/getcwd.c \
@ -175,7 +176,6 @@ libc_common_src_files := \
stdlib/tolower_.c \
stdlib/toupper_.c \
stdlib/wchar.c \
string/bcopy.c \
string/index.c \
string/memccpy.c \
string/memchr.c \
@ -186,7 +186,6 @@ libc_common_src_files := \
string/strcasestr.c \
string/strcat.c \
string/strchr.c \
string/strcmp.c \
string/strcoll.c \
string/strcpy.c \
string/strcspn.c \
@ -196,7 +195,6 @@ libc_common_src_files := \
string/strlcat.c \
string/strlcpy.c \
string/strncat.c \
string/strncmp.c \
string/strncpy.c \
string/strndup.c \
string/strnlen.c \
@ -208,6 +206,34 @@ libc_common_src_files := \
string/strtok.c \
string/strtotimeval.c \
string/strxfrm.c \
wchar/wcpcpy.c \
wchar/wcpncpy.c \
wchar/wcscasecmp.c \
wchar/wcscat.c \
wchar/wcschr.c \
wchar/wcscmp.c \
wchar/wcscoll.c \
wchar/wcscpy.c \
wchar/wcscspn.c \
wchar/wcsdup.c \
wchar/wcslcat.c \
wchar/wcslcpy.c \
wchar/wcslen.c \
wchar/wcsncasecmp.c \
wchar/wcsncat.c \
wchar/wcsncmp.c \
wchar/wcsncpy.c \
wchar/wcsnlen.c \
wchar/wcspbrk.c \
wchar/wcsrchr.c \
wchar/wcsspn.c \
wchar/wcsstr.c \
wchar/wcstok.c \
wchar/wcswidth.c \
wchar/wmemchr.c \
wchar/wmemcpy.c \
wchar/wmemmove.c \
wchar/wmemset.c \
inet/bindresvport.c \
inet/inet_addr.c \
inet/inet_aton.c \
@ -292,7 +318,6 @@ libc_common_src_files := \
# =========================================================
ifeq ($(TARGET_ARCH),arm)
libc_common_src_files += \
bionic/eabi.c \
bionic/bionic_clone.c \
arch-arm/bionic/__get_pc.S \
arch-arm/bionic/__get_sp.S \
@ -300,6 +325,7 @@ libc_common_src_files += \
arch-arm/bionic/_setjmp.S \
arch-arm/bionic/atomics_arm.S \
arch-arm/bionic/clone.S \
arch-arm/bionic/eabi.c \
arch-arm/bionic/ffs.S \
arch-arm/bionic/kill.S \
arch-arm/bionic/libgcc_compat.c \
@ -313,6 +339,9 @@ libc_common_src_files += \
arch-arm/bionic/strlen.c.arm \
arch-arm/bionic/syscall.S \
string/memmove.c.arm \
string/bcopy.c \
string/strcmp.c \
string/strncmp.c \
unistd/socketcalls.c
# These files need to be arm so that gdbserver
@ -320,6 +349,7 @@ libc_common_src_files += \
# up any thumb code.
libc_common_src_files += \
bionic/pthread.c.arm \
bionic/pthread-rwlocks.c.arm \
bionic/pthread-timers.c.arm \
bionic/ptrace.c.arm
@ -344,13 +374,17 @@ libc_common_src_files += \
arch-x86/bionic/_setjmp.S \
arch-x86/bionic/vfork.S \
arch-x86/bionic/syscall.S \
arch-x86/string/bzero.S \
arch-x86/string/memset.S \
arch-x86/string/memcmp.S \
arch-x86/string/memcpy.S \
arch-x86/string/bcopy_wrapper.S \
arch-x86/string/memcpy_wrapper.S \
arch-x86/string/memmove_wrapper.S \
arch-x86/string/bzero_wrapper.S \
arch-x86/string/memcmp_wrapper.S \
arch-x86/string/memset_wrapper.S \
arch-x86/string/strcmp_wrapper.S \
arch-x86/string/strncmp_wrapper.S \
arch-x86/string/strlen.S \
string/memmove.c \
bionic/pthread.c \
bionic/pthread-rwlocks.c \
bionic/pthread-timers.c \
bionic/ptrace.c
@ -381,10 +415,13 @@ libc_common_src_files += \
arch-sh/bionic/__set_tls.c \
arch-sh/bionic/__get_tls.c \
arch-sh/bionic/ffs.S \
string/bcopy.c \
string/strcmp.c \
string/strncmp.c \
string/memcmp.c \
string/strlen.c \
bionic/eabi.c \
bionic/pthread.c \
bionic/pthread-rwlocks.c \
bionic/pthread-timers.c \
bionic/ptrace.c \
unistd/socketcalls.c
@ -403,7 +440,6 @@ libc_common_cflags := \
-D_LIBC=1 \
-DSOFTFLOAT \
-DFLOATING_POINT \
-DNEED_PSELECT=1 \
-DINET6 \
-I$(LOCAL_PATH)/private \
-DUSE_DL_PREFIX \
@ -437,6 +473,10 @@ ifeq ($(TARGET_ARCH),arm)
else # !arm
ifeq ($(TARGET_ARCH),x86)
libc_crt_target_cflags := -m32
# Enable recent IA friendly memory routines (such as for Atom)
# These will not work on the earlier x86 machines
libc_common_cflags += -mtune=i686 -DUSE_SSSE3 -DUSE_SSE2
endif # x86
endif # !arm
@ -447,6 +487,10 @@ else
libc_common_cflags += -DANDROID_SMP=0
endif
# Needed to access private/__dso_handle.S from
# crtbegin_xxx.S and crtend_xxx.S
#
libc_crt_target_cflags += -I$(LOCAL_PATH)/private
# Define some common includes
# ========================================================
@ -461,10 +505,17 @@ libc_common_c_includes := \
# executables)
# ==========================================================================
ifeq ($(TARGET_ARCH),x86)
# we only need begin_so/end_so for x86, since it needs an appropriate .init
# section in the shared library with a function to call all the entries in
# .ctors section. ARM uses init_array, and does not need the function.
ifneq ($(filter arm x86,$(TARGET_ARCH)),)
# ARM and x86 need crtbegin_so/crtend_so.
#
# For x86, the .init section must point to a function that calls all
# entries in the .ctors section. (on ARM this is done through the
# .init_array section instead).
#
# For both platforms, the .fini_array section must point to a function
# that will call __cxa_finalize(&__dso_handle) in order to ensure that
# static C++ destructors are properly called on dlclose().
#
GEN := $(TARGET_OUT_STATIC_LIBRARIES)/crtbegin_so.o
$(GEN): $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin_so.S
@mkdir -p $(dir $@)

View File

@ -120,7 +120,7 @@ int fsync(int) 118
int fchown:fchown32(int, uid_t, gid_t) 207
void sync(void) 36
int __fcntl64:fcntl64(int, int, void *) 221
int fstatfs:fstatfs64(int, size_t, struct statfs *) 267,269
int __fstatfs64:fstatfs64(int, size_t, struct statfs *) 267,269
ssize_t sendfile(int out_fd, int in_fd, off_t *offset, size_t count) 187
int fstatat:fstatat64(int dirfd, const char *path, struct stat *buf, int flags) 327,300
int mkdirat(int dirfd, const char *pathname, mode_t mode) 323,296
@ -241,6 +241,7 @@ int __syslog:syslog(int, char *, int) 103
int init_module(void *, unsigned long, const char *) 128
int delete_module(const char*, unsigned int) 129
int klogctl:syslog(int, char *, int) 103
int sysinfo(struct sysinfo *) 116
# futex
int futex(void *, int, int, void *, void *, int) 240

View File

@ -3,6 +3,7 @@
/*
* Copyright (c) 1997 Mark Brinicombe
* Copyright (c) 2010 Android Open Source Project.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,6 +36,7 @@
#include <machine/asm.h>
#include <machine/setjmp.h>
#include <machine/cpu-features.h>
/*
* C library -- _setjmp, _longjmp
@ -51,18 +53,20 @@
ENTRY(_setjmp)
ldr r1, .L_setjmp_magic
str r1, [r0], #4
#ifdef SOFTFLOAT
add r0, r0, #52
#else
/* Store fp registers */
sfm f4, 4, [r0], #48
/* Store fpsr */
rfs r1
str r1, [r0], #0x0004
#endif /* SOFTFLOAT */
/* Store integer registers */
stmia r0, {r4-r14}
str r1, [r0, #(_JB_MAGIC * 4)]
/* Store core registers */
add r1, r0, #(_JB_CORE_BASE * 4)
stmia r1, {r4-r14}
#ifdef __ARM_HAVE_VFP
/* Store floating-point registers */
add r1, r0, #(_JB_FLOAT_BASE * 4)
vstmia r1, {d8-d15}
/* Store floating-point state */
fmrx r1, fpscr
str r1, [r0, #(_JB_FLOAT_STATE * 4)]
#endif /* __ARM_HAVE_VFP */
mov r0, #0x00000000
bx lr
@ -72,21 +76,22 @@ ENTRY(_setjmp)
ENTRY(_longjmp)
ldr r2, .L_setjmp_magic
ldr r3, [r0], #4
ldr r3, [r0, #(_JB_MAGIC * 4)]
teq r2, r3
bne botch
#ifdef SOFTFLOAT
add r0, r0, #52
#else
/* Restore fp registers */
lfm f4, 4, [r0], #48
/* Restore fpsr */
ldr r4, [r0], #0x0004
wfs r4
#endif /* SOFTFLOAT */
/* Restore integer registers */
ldmia r0, {r4-r14}
#ifdef __ARM_HAVE_VFP
/* Restore floating-point registers */
add r2, r0, #(_JB_FLOAT_BASE * 4)
vldmia r2, {d8-d15}
/* Restore floating-point state */
ldr r2, [r0, #(_JB_FLOAT_STATE * 4)]
fmxr fpscr, r2
#endif /* __ARM_HAVE_VFP */
/* Restore core registers */
add r2, r0, #(_JB_CORE_BASE * 4)
ldmia r2, {r4-r14}
/* Validate sp and r14 */
teq sp, #0

View File

@ -31,7 +31,7 @@
.globl _start
# this is the small startup code that is first run when
# any executable that is statically-linked with Bionic
# any executable that is dynamically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
@ -63,13 +63,7 @@ _start:
.long __INIT_ARRAY__
.long __FINI_ARRAY__
.long __CTOR_LIST__
# the .ctors section contains a list of pointers to "constructor"
# functions that need to be called in order during C library initialization,
# just before the program is being run. This is a C++ requirement
#
# the last entry shall be 0, and is defined in crtend.S
#
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
@ -90,3 +84,4 @@ __FINI_ARRAY__:
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"

View File

@ -0,0 +1,55 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
# Implement static C++ destructors when the shared
# library is unloaded through dlclose().
#
# A call to this function must be the first entry
# in the .fini_array. See 3.3.5.3.C of C++ ABI
# standard.
#
__on_dlclose:
adr r0, 0f
ldr r0, [r0]
b __cxa_finalize
0:
.long __dso_handle
.section .init_array, "aw"
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.long __on_dlclose
#include "__dso_handle.S"

View File

@ -63,13 +63,7 @@ _start:
.long __INIT_ARRAY__
.long __FINI_ARRAY__
.long __CTOR_LIST__
# the .ctors section contains a list of pointers to "constructor"
# functions that need to be called in order during C library initialization,
# just before the program is being run. This is a C++ requirement
#
# the last entry shall be 0, and is defined in crtend.S
#
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
@ -90,3 +84,5 @@ __FINI_ARRAY__:
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"

View File

@ -0,0 +1,38 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* This is the same than crtend.S except that a shared library
* cannot have a .preinit_array
*/
.section .init_array, "aw"
.long 0
.section .fini_array, "aw"
.long 0

View File

@ -30,7 +30,21 @@
extern int __cxa_atexit(void (*)(void*), void*, void* );
void* __dso_handle = 0;
/* Temporary hack: this variable should not be part of the C library
* itself, but placed in the .bss section of each executable or
* shared library instead.
*
* We keep it here temporarily until the build system has been
* modified properly to use crtbegin_so.S and crtend_so.S when
* generating shared libraries.
*
* It must be a 'weak' symbol to avoid conflicts with the definitions
* that have been moved to crtbegin_static.S and crtbegin_dynamic.S
*
* For the record, it is used for static C++ object construction
* and destruction. See http://www.codesourcery.com/public/cxx-abi/abi.html#dso-dtor
*/
void* __attribute__((weak)) __dso_handle;
/* The "C++ ABI for ARM" document states that static C++ constructors,
* which are called from the .init_array, should manually call

View File

@ -3,6 +3,7 @@
/*
* Copyright (c) 1997 Mark Brinicombe
* Copyright (c) 2010 Android Open Source Project.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,6 +36,7 @@
#include <machine/asm.h>
#include <machine/setjmp.h>
#include <machine/cpu-features.h>
/*
* C library -- setjmp, longjmp
@ -57,24 +59,26 @@ ENTRY(setjmp)
ldmfd sp!, {r0, r14}
/* Store signal mask */
str r1, [r0, #(25 * 4)]
str r1, [r0, #(_JB_SIGMASK * 4)]
ldr r1, .Lsetjmp_magic
str r1, [r0], #4
str r1, [r0, #(_JB_MAGIC * 4)]
#ifdef SOFTFLOAT
add r0, r0, #52
#else
/* Store fp registers */
sfm f4, 4, [r0], #48
/* Store fpsr */
rfs r1
str r1, [r0], #0x0004
#endif /*SOFTFLOAT*/
/* Store integer registers */
stmia r0, {r4-r14}
mov r0, #0x00000000
bx lr
/* Store core registers */
add r1, r0, #(_JB_CORE_BASE * 4)
stmia r1, {r4-r14}
#ifdef __ARM_HAVE_VFP
/* Store floating-point registers */
add r1, r0, #(_JB_FLOAT_BASE * 4)
vstmia r1, {d8-d15}
/* Store floating-point state */
fmrx r1, fpscr
str r1, [r0, #(_JB_FLOAT_STATE * 4)]
#endif /* __ARM_HAVE_VFP */
mov r0, #0x00000000
bx lr
.Lsetjmp_magic:
.word _JB_MAGIC_SETJMP
@ -82,12 +86,12 @@ ENTRY(setjmp)
ENTRY(longjmp)
ldr r2, .Lsetjmp_magic
ldr r3, [r0]
ldr r3, [r0, #(_JB_MAGIC * 4)]
teq r2, r3
bne botch
/* Fetch signal mask */
ldr r2, [r0, #(25 * 4)]
ldr r2, [r0, #(_JB_SIGMASK * 4)]
/* Set signal mask */
stmfd sp!, {r0, r1, r14}
@ -99,18 +103,18 @@ ENTRY(longjmp)
add sp, sp, #4 /* unalign the stack */
ldmfd sp!, {r0, r1, r14}
add r0, r0, #4
#ifdef SOFTFLOAT
add r0, r0, #52
#else
/* Restore fp registers */
lfm f4, 4, [r0], #48
/* Restore FPSR */
ldr r4, [r0], #0x0004
wfs r4
#endif /* SOFTFLOAT */
/* Restore integer registers */
ldmia r0, {r4-r14}
#ifdef __ARM_HAVE_VFP
/* Restore floating-point registers */
add r2, r0, #(_JB_FLOAT_BASE * 4)
vldmia r2, {d8-d15}
/* Restore floating-point state */
ldr r2, [r0, #(_JB_FLOAT_STATE * 4)]
fmxr fpscr, r2
#endif /* __ARM_HAVE_VFP */
/* Restore core registers */
add r2, r0, #(_JB_CORE_BASE * 4)
ldmia r2, {r4-r14}
/* Validate sp and r14 */
teq sp, #0

View File

@ -170,6 +170,19 @@
# define __ARM_HAVE_LDREXD
#endif
/* define _ARM_HAVE_VFP if we have VFPv3
*/
#if __ARM_ARCH__ >= 7 && defined __VFP_FP__
# define __ARM_HAVE_VFP
#endif
/* define _ARM_HAVE_NEON for ARMv7 architecture if we support the
* Neon SIMD instruction set extensions. This also implies
* that VFPv3-D32 is supported.
*/
#if __ARM_ARCH__ >= 7 && defined __ARM_NEON__
# define __ARM_HAVE_NEON
#endif
/* Assembly-only macros */

View File

@ -1,87 +1,82 @@
/* $OpenBSD: setjmp.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
/* $NetBSD: setjmp.h,v 1.2 2001/08/25 14:45:59 bjh21 Exp $ */
/*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* machine/setjmp.h: machine dependent setjmp-related information.
*/
#ifdef __ELF__
#define _JBLEN 64 /* size, in longs, of a jmp_buf */
#else
#define _JBLEN 29 /* size, in longs, of a jmp_buf */
#endif
/*
* NOTE: The internal structure of a jmp_buf is *PRIVATE*
* This information is provided as there is software
* that fiddles with this with obtain the stack pointer
* (yes really ! and its commercial !).
/* _JBLEN is the size of a jmp_buf in longs.
* Do not modify this value or you will break the ABI !
*
* Description of the setjmp buffer
*
* word 0 magic number (dependant on creator)
* 1 - 3 f4 fp register 4
* 4 - 6 f5 fp register 5
* 7 - 9 f6 fp register 6
* 10 - 12 f7 fp register 7
* 13 fpsr fp status register
* 14 r4 register 4
* 15 r5 register 5
* 16 r6 register 6
* 17 r7 register 7
* 18 r8 register 8
* 19 r9 register 9
* 20 r10 register 10 (sl)
* 21 r11 register 11 (fp)
* 22 r12 register 12 (ip)
* 23 r13 register 13 (sp)
* 24 r14 register 14 (lr)
* 25 signal mask (dependant on magic)
* 26 (con't)
* 27 (con't)
* 28 (con't)
*
* The magic number number identifies the jmp_buf and
* how the buffer was created as well as providing
* a sanity check
*
* A side note I should mention - Please do not tamper
* with the floating point fields. While they are
* always saved and restored at the moment this cannot
* be garenteed especially if the compiler happens
* to be generating soft-float code so no fp
* registers will be used.
*
* Whilst this can be seen an encouraging people to
* use the setjmp buffer in this way I think that it
* is for the best then if changes occur compiles will
* break rather than just having new builds falling over
* mysteriously.
* This value comes from the original OpenBSD ARM-specific header
* that was replaced by this one.
*/
#define _JBLEN 64
/* According to the ARM AAPCS document, we only need to save
* the following registers:
*
* Core r4-r14
*
* VFP d8-d15 (see section 5.1.2.1)
*
* Registers s16-s31 (d8-d15, q4-q7) must be preserved across subroutine
* calls; registers s0-s15 (d0-d7, q0-q3) do not need to be preserved
* (and can be used for passing arguments or returning results in standard
* procedure-call variants). Registers d16-d31 (q8-q15), if present, do
* not need to be preserved.
*
* FPSCR saved because GLibc does saves it too.
*
*/
/* The internal structure of a jmp_buf is totally private.
* Current layout (may change in the future):
*
* word name description
* 0 magic magic number
* 1 sigmask signal mask (not used with _setjmp / _longjmp)
* 2 float_base base of float registers (d8 to d15)
* 18 float_state floating-point status and control register
* 19 core_base base of core registers (r4 to r14)
* 30 reserved reserved entries (room to grow)
* 64
*
* NOTE: float_base must be at an even word index, since the
* FP registers will be loaded/stored with instructions
* that expect 8-byte alignment.
*/
#define _JB_MAGIC 0
#define _JB_SIGMASK (_JB_MAGIC+1)
#define _JB_FLOAT_BASE (_JB_SIGMASK+1)
#define _JB_FLOAT_STATE (_JB_FLOAT_BASE + (15-8+1)*2)
#define _JB_CORE_BASE (_JB_FLOAT_STATE+1)
#define _JB_MAGIC__SETJMP 0x4278f500
#define _JB_MAGIC_SETJMP 0x4278f501
/* Valid for all jmp_buf's */
#define _JB_MAGIC 0
#define _JB_REG_F4 1
#define _JB_REG_F5 4
#define _JB_REG_F6 7
#define _JB_REG_F7 10
#define _JB_REG_FPSR 13
#define _JB_REG_R4 14
#define _JB_REG_R5 15
#define _JB_REG_R6 16
#define _JB_REG_R7 17
#define _JB_REG_R8 18
#define _JB_REG_R9 19
#define _JB_REG_R10 20
#define _JB_REG_R11 21
#define _JB_REG_R12 22
#define _JB_REG_R13 23
#define _JB_REG_R14 24
/* Only valid with the _JB_MAGIC_SETJMP magic */
#define _JB_SIGMASK 25

View File

@ -74,7 +74,7 @@ syscall_src += arch-arm/syscalls/fsync.S
syscall_src += arch-arm/syscalls/fchown.S
syscall_src += arch-arm/syscalls/sync.S
syscall_src += arch-arm/syscalls/__fcntl64.S
syscall_src += arch-arm/syscalls/fstatfs.S
syscall_src += arch-arm/syscalls/__fstatfs64.S
syscall_src += arch-arm/syscalls/sendfile.S
syscall_src += arch-arm/syscalls/fstatat.S
syscall_src += arch-arm/syscalls/mkdirat.S
@ -161,6 +161,7 @@ syscall_src += arch-arm/syscalls/__syslog.S
syscall_src += arch-arm/syscalls/init_module.S
syscall_src += arch-arm/syscalls/delete_module.S
syscall_src += arch-arm/syscalls/klogctl.S
syscall_src += arch-arm/syscalls/sysinfo.S
syscall_src += arch-arm/syscalls/futex.S
syscall_src += arch-arm/syscalls/epoll_create.S
syscall_src += arch-arm/syscalls/epoll_ctl.S

View File

@ -2,12 +2,12 @@
#include <sys/linux-syscalls.h>
.text
.type fstatfs, #function
.globl fstatfs
.type __fstatfs64, #function
.globl __fstatfs64
.align 4
.fnstart
fstatfs:
__fstatfs64:
.save {r4, r7}
stmfd sp!, {r4, r7}
ldr r7, =__NR_fstatfs64

View File

@ -0,0 +1,19 @@
/* autogenerated by gensyscalls.py */
#include <sys/linux-syscalls.h>
.text
.type sysinfo, #function
.globl sysinfo
.align 4
.fnstart
sysinfo:
.save {r4, r7}
stmfd sp!, {r4, r7}
ldr r7, =__NR_sysinfo
swi #0
ldmfd sp!, {r4, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
.fnend

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2009 The Android Open Source Project
* Copyright (C) 2009-2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,7 +31,7 @@
.globl _start
# this is the small startup code that is first run when
# any executable that is statically-linked with Bionic
# any executable that is dynamically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
@ -93,3 +93,5 @@ __FINI_ARRAY__:
.globl __CTOR_LIST__
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2009 The Android Open Source Project
* Copyright (C) 2009-2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -68,12 +68,6 @@ _start:
.long __FINI_ARRAY__
.long __CTOR_LIST__
# the .ctors section contains a list of pointers to "constructor"
# functions that need to be called in order during C library initialization,
# just before the program is being run. This is a C++ requirement
#
# the last entry shall be 0, and is defined in crtend.S
#
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
@ -94,3 +88,4 @@ __FINI_ARRAY__:
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"

View File

@ -77,7 +77,7 @@ syscall_src += arch-sh/syscalls/fsync.S
syscall_src += arch-sh/syscalls/fchown.S
syscall_src += arch-sh/syscalls/sync.S
syscall_src += arch-sh/syscalls/__fcntl64.S
syscall_src += arch-sh/syscalls/fstatfs.S
syscall_src += arch-sh/syscalls/__fstatfs64.S
syscall_src += arch-sh/syscalls/sendfile.S
syscall_src += arch-sh/syscalls/fstatat.S
syscall_src += arch-sh/syscalls/mkdirat.S
@ -150,6 +150,7 @@ syscall_src += arch-sh/syscalls/__syslog.S
syscall_src += arch-sh/syscalls/init_module.S
syscall_src += arch-sh/syscalls/delete_module.S
syscall_src += arch-sh/syscalls/klogctl.S
syscall_src += arch-sh/syscalls/sysinfo.S
syscall_src += arch-sh/syscalls/futex.S
syscall_src += arch-sh/syscalls/epoll_create.S
syscall_src += arch-sh/syscalls/epoll_ctl.S

View File

@ -2,11 +2,11 @@
#include <sys/linux-syscalls.h>
.text
.type fstatfs, @function
.globl fstatfs
.type __fstatfs64, @function
.globl __fstatfs64
.align 4
fstatfs:
__fstatfs64:
/* invoke trap */
mov.l 0f, r3 /* trap num */

View File

@ -0,0 +1,32 @@
/* autogenerated by gensyscalls.py */
#include <sys/linux-syscalls.h>
.text
.type sysinfo, @function
.globl sysinfo
.align 4
sysinfo:
/* invoke trap */
mov.l 0f, r3 /* trap num */
trapa #(1 + 0x10)
/* check return value */
cmp/pz r0
bt __NR_sysinfo_end
/* keep error number */
sts.l pr, @-r15
mov.l 1f, r1
jsr @r1
mov r0, r4
lds.l @r15+, pr
__NR_sysinfo_end:
rts
nop
.align 2
0: .long __NR_sysinfo
1: .long __set_syscall_errno

View File

@ -30,7 +30,7 @@
.globl _start
# this is the small startup code that is first run when
# any executable that is statically-linked with Bionic
# any executable that is dynamically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
@ -94,3 +94,4 @@ __FINI_ARRAY__:
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"

View File

@ -1,3 +1,10 @@
# This function is to be called when the shared library
# is unloaded through dlclose()
_on_dlclose:
lea __dso_handle, %eax
call __cxa_finalize
ret
/* we put the _init() function here in case the user files for the shared
* libs want to drop things into .init section.
* We then will call our ctors from crtend_so.o */
@ -20,6 +27,7 @@ __INIT_ARRAY__:
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.long _on_dlclose
.section .ctors, "aw"
.align 4
@ -27,3 +35,5 @@ __FINI_ARRAY__:
.globl __CTOR_LIST__
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"

View File

@ -67,12 +67,6 @@ _start:
.long __FINI_ARRAY__
.long __CTOR_LIST__
# the .ctors section contains a list of pointers to "constructor"
# functions that need to be called in order during C library initialization,
# just before the program is being run. This is a C++ requirement
#
# the last entry shall be 0, and is defined in crtend.S
#
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
@ -93,3 +87,4 @@ __FINI_ARRAY__:
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"

View File

@ -36,10 +36,23 @@
#define _I386__TYPES_H_
/* the kernel defines size_t as unsigned int, but g++ wants it to be unsigned long */
#define _SIZE_T
#ifndef _SIZE_T
# define _SIZE_T
# ifdef ANDROID
typedef unsigned int size_t;
# else
typedef unsigned long size_t;
# endif
#endif
#if !defined(_SSIZE_T) && !defined(_SSIZE_T_DEFINED_)
#define _SSIZE_T
#define _SSIZE_T_DEFINED_
typedef long int ssize_t;
#endif
#ifndef _PTRDIFF_T
#define _PTRDIFF_T
typedef unsigned int size_t;
typedef int ptrdiff_t;
typedef long ptrdiff_t;
#endif
#define _OFF_T_DEFINED_
#define _SIZE_T_DEFINED_

View File

@ -0,0 +1,45 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# include "cache_wrapper.S"
# undef __i686
# define MEMCPY bcopy
# define USE_AS_MEMMOVE
# define USE_AS_BCOPY
# include "ssse3-memcpy5.S"
#else
# include "bcopy.S"
#endif

View File

@ -0,0 +1,43 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSE2)
# include "cache_wrapper.S"
# undef __i686
# define USE_AS_BZERO
# define sse2_memset5_atom bzero
# include "sse2-memset5-atom.S"
#else
# include "bzero.S"
#endif

View File

@ -0,0 +1,35 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Values are optimized for Atom */
#define SHARED_CACHE_SIZE (512*1024) /* Atom L2 Cache */
#define DATA_CACHE_SIZE (24*1024) /* Atom L1 Data Cache */
#define SHARED_CACHE_SIZE_HALF (SHARED_CACHE_SIZE / 2)
#define DATA_CACHE_SIZE_HALF (DATA_CACHE_SIZE / 2)

View File

@ -0,0 +1,40 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# define MEMCMP memcmp
# include "ssse3-memcmp3.S"
#else
# include "memcmp.S"
#endif

View File

@ -0,0 +1,43 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# include "cache_wrapper.S"
# undef __i686
# define MEMCPY memcpy
# define USE_AS_MEMMOVE
# include "ssse3-memcpy5.S"
#else
# include "memcpy.S"
#endif

View File

@ -0,0 +1,43 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# include "cache_wrapper.S"
# undef __i686
# define MEMCPY memmove
# define USE_AS_MEMMOVE
# include "ssse3-memcpy5.S"
#else
# include "memmove.S"
#endif

View File

@ -0,0 +1,42 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSE2)
# include "cache_wrapper.S"
# undef __i686
# define sse2_memset5_atom memset
# include "sse2-memset5-atom.S"
#else
# include "memset.S"
#endif

View File

@ -0,0 +1,907 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef L
# define L(label) .L##label
#endif
#ifndef ALIGN
# define ALIGN(n) .p2align n
#endif
#ifndef cfi_startproc
# define cfi_startproc .cfi_startproc
#endif
#ifndef cfi_endproc
# define cfi_endproc .cfi_endproc
#endif
#ifndef cfi_rel_offset
# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
#endif
#ifndef cfi_restore
# define cfi_restore(reg) .cfi_restore (reg)
#endif
#ifndef cfi_adjust_cfa_offset
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
#endif
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
.globl name; \
.p2align 4; \
name: \
cfi_startproc
#endif
#ifndef END
# define END(name) \
cfi_endproc; \
.size name, .-name
#endif
#define CFI_PUSH(REG) \
cfi_adjust_cfa_offset (4); \
cfi_rel_offset (REG, 0)
#define CFI_POP(REG) \
cfi_adjust_cfa_offset (-4); \
cfi_restore (REG)
#define PUSH(REG) pushl REG; CFI_PUSH (REG)
#define POP(REG) popl REG; CFI_POP (REG)
#ifdef USE_AS_BZERO
# define DEST PARMS
# define LEN DEST+4
# define SETRTNVAL
#else
# define DEST PARMS
# define CHR DEST+4
# define LEN CHR+4
# define SETRTNVAL movl DEST(%esp), %eax
#endif
#ifdef SHARED
# define ENTRANCE PUSH (%ebx);
# define RETURN_END POP (%ebx); ret
# define RETURN RETURN_END; CFI_PUSH (%ebx)
# define PARMS 8 /* Preserve EBX. */
# define JMPTBL(I, B) I - B
/* Load an entry in a jump table into EBX and branch to it. TABLE is a
jump table with relative offsets. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE) \
/* We first load PC into EBX. */ \
call __i686.get_pc_thunk.bx; \
/* Get the address of the jump table. */ \
add $(TABLE - .), %ebx; \
/* Get the entry and convert the relative offset to the \
absolute address. */ \
add (%ebx,%ecx,4), %ebx; \
add %ecx, %edx; \
/* We loaded the jump table and adjuested EDX. Go. */ \
jmp *%ebx
.section .gnu.linkonce.t.__i686.get_pc_thunk.bx,"ax",@progbits
.globl __i686.get_pc_thunk.bx
.hidden __i686.get_pc_thunk.bx
ALIGN (4)
.type __i686.get_pc_thunk.bx,@function
__i686.get_pc_thunk.bx:
movl (%esp), %ebx
ret
#else
# define ENTRANCE
# define RETURN_END ret
# define RETURN RETURN_END
# define PARMS 4
# define JMPTBL(I, B) I
/* Branch to an entry in a jump table. TABLE is a jump table with
absolute offsets. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE) \
add %ecx, %edx; \
jmp *TABLE(,%ecx,4)
#endif
.section .text.sse2,"ax",@progbits
ALIGN (4)
ENTRY (sse2_memset5_atom)
ENTRANCE
movl LEN(%esp), %ecx
#ifdef USE_AS_BZERO
xor %eax, %eax
#else
movzbl CHR(%esp), %eax
movb %al, %ah
/* Fill the whole EAX with pattern. */
movl %eax, %edx
shl $16, %eax
or %edx, %eax
#endif
movl DEST(%esp), %edx
cmp $32, %ecx
jae L(32bytesormore)
L(write_less32bytes):
BRANCH_TO_JMPTBL_ENTRY (L(table_less_32bytes))
.pushsection .rodata.sse2,"a",@progbits
ALIGN (2)
L(table_less_32bytes):
.int JMPTBL (L(write_0bytes), L(table_less_32bytes))
.int JMPTBL (L(write_1bytes), L(table_less_32bytes))
.int JMPTBL (L(write_2bytes), L(table_less_32bytes))
.int JMPTBL (L(write_3bytes), L(table_less_32bytes))
.int JMPTBL (L(write_4bytes), L(table_less_32bytes))
.int JMPTBL (L(write_5bytes), L(table_less_32bytes))
.int JMPTBL (L(write_6bytes), L(table_less_32bytes))
.int JMPTBL (L(write_7bytes), L(table_less_32bytes))
.int JMPTBL (L(write_8bytes), L(table_less_32bytes))
.int JMPTBL (L(write_9bytes), L(table_less_32bytes))
.int JMPTBL (L(write_10bytes), L(table_less_32bytes))
.int JMPTBL (L(write_11bytes), L(table_less_32bytes))
.int JMPTBL (L(write_12bytes), L(table_less_32bytes))
.int JMPTBL (L(write_13bytes), L(table_less_32bytes))
.int JMPTBL (L(write_14bytes), L(table_less_32bytes))
.int JMPTBL (L(write_15bytes), L(table_less_32bytes))
.int JMPTBL (L(write_16bytes), L(table_less_32bytes))
.int JMPTBL (L(write_17bytes), L(table_less_32bytes))
.int JMPTBL (L(write_18bytes), L(table_less_32bytes))
.int JMPTBL (L(write_19bytes), L(table_less_32bytes))
.int JMPTBL (L(write_20bytes), L(table_less_32bytes))
.int JMPTBL (L(write_21bytes), L(table_less_32bytes))
.int JMPTBL (L(write_22bytes), L(table_less_32bytes))
.int JMPTBL (L(write_23bytes), L(table_less_32bytes))
.int JMPTBL (L(write_24bytes), L(table_less_32bytes))
.int JMPTBL (L(write_25bytes), L(table_less_32bytes))
.int JMPTBL (L(write_26bytes), L(table_less_32bytes))
.int JMPTBL (L(write_27bytes), L(table_less_32bytes))
.int JMPTBL (L(write_28bytes), L(table_less_32bytes))
.int JMPTBL (L(write_29bytes), L(table_less_32bytes))
.int JMPTBL (L(write_30bytes), L(table_less_32bytes))
.int JMPTBL (L(write_31bytes), L(table_less_32bytes))
.popsection
ALIGN (4)
L(write_28bytes):
movl %eax, -28(%edx)
L(write_24bytes):
movl %eax, -24(%edx)
L(write_20bytes):
movl %eax, -20(%edx)
L(write_16bytes):
movl %eax, -16(%edx)
L(write_12bytes):
movl %eax, -12(%edx)
L(write_8bytes):
movl %eax, -8(%edx)
L(write_4bytes):
movl %eax, -4(%edx)
L(write_0bytes):
SETRTNVAL
RETURN
ALIGN (4)
L(write_29bytes):
movl %eax, -29(%edx)
L(write_25bytes):
movl %eax, -25(%edx)
L(write_21bytes):
movl %eax, -21(%edx)
L(write_17bytes):
movl %eax, -17(%edx)
L(write_13bytes):
movl %eax, -13(%edx)
L(write_9bytes):
movl %eax, -9(%edx)
L(write_5bytes):
movl %eax, -5(%edx)
L(write_1bytes):
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(write_30bytes):
movl %eax, -30(%edx)
L(write_26bytes):
movl %eax, -26(%edx)
L(write_22bytes):
movl %eax, -22(%edx)
L(write_18bytes):
movl %eax, -18(%edx)
L(write_14bytes):
movl %eax, -14(%edx)
L(write_10bytes):
movl %eax, -10(%edx)
L(write_6bytes):
movl %eax, -6(%edx)
L(write_2bytes):
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(write_31bytes):
movl %eax, -31(%edx)
L(write_27bytes):
movl %eax, -27(%edx)
L(write_23bytes):
movl %eax, -23(%edx)
L(write_19bytes):
movl %eax, -19(%edx)
L(write_15bytes):
movl %eax, -15(%edx)
L(write_11bytes):
movl %eax, -11(%edx)
L(write_7bytes):
movl %eax, -7(%edx)
L(write_3bytes):
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
/* ECX > 32 and EDX is 4 byte aligned. */
L(32bytesormore):
/* Fill xmm0 with the pattern. */
#ifdef USE_AS_BZERO
pxor %xmm0, %xmm0
#else
movd %eax, %xmm0
punpcklbw %xmm0, %xmm0
pshufd $0, %xmm0, %xmm0
#endif
testl $0xf, %edx
jz L(aligned_16)
/* ECX > 32 and EDX is not 16 byte aligned. */
L(not_aligned_16):
movdqu %xmm0, (%edx)
movl %edx, %eax
and $-16, %edx
add $16, %edx
sub %edx, %eax
add %eax, %ecx
movd %xmm0, %eax
ALIGN (4)
L(aligned_16):
cmp $128, %ecx
jae L(128bytesormore)
L(aligned_16_less128bytes):
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
ALIGN (4)
L(128bytesormore):
#ifdef SHARED_CACHE_SIZE
PUSH (%ebx)
mov $SHARED_CACHE_SIZE, %ebx
#else
# ifdef SHARED
call __i686.get_pc_thunk.bx
add $_GLOBAL_OFFSET_TABLE_, %ebx
mov __x86_shared_cache_size@GOTOFF(%ebx), %ebx
# else
PUSH (%ebx)
mov __x86_shared_cache_size, %ebx
# endif
#endif
cmp %ebx, %ecx
jae L(128bytesormore_nt_start)
#ifdef DATA_CACHE_SIZE
POP (%ebx)
cmp $DATA_CACHE_SIZE, %ecx
#else
# ifdef SHARED
call __i686.get_pc_thunk.bx
add $_GLOBAL_OFFSET_TABLE_, %ebx
cmp __x86_data_cache_size@GOTOFF(%ebx), %ecx
# else
POP (%ebx)
cmp __x86_data_cache_size, %ecx
# endif
#endif
jae L(128bytes_L2_normal)
subl $128, %ecx
L(128bytesormore_normal):
sub $128, %ecx
movdqa %xmm0, (%edx)
movdqa %xmm0, 0x10(%edx)
movdqa %xmm0, 0x20(%edx)
movdqa %xmm0, 0x30(%edx)
movdqa %xmm0, 0x40(%edx)
movdqa %xmm0, 0x50(%edx)
movdqa %xmm0, 0x60(%edx)
movdqa %xmm0, 0x70(%edx)
lea 128(%edx), %edx
jb L(128bytesless_normal)
sub $128, %ecx
movdqa %xmm0, (%edx)
movdqa %xmm0, 0x10(%edx)
movdqa %xmm0, 0x20(%edx)
movdqa %xmm0, 0x30(%edx)
movdqa %xmm0, 0x40(%edx)
movdqa %xmm0, 0x50(%edx)
movdqa %xmm0, 0x60(%edx)
movdqa %xmm0, 0x70(%edx)
lea 128(%edx), %edx
jae L(128bytesormore_normal)
L(128bytesless_normal):
lea 128(%ecx), %ecx
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
ALIGN (4)
L(128bytes_L2_normal):
prefetcht0 0x380(%edx)
prefetcht0 0x3c0(%edx)
sub $128, %ecx
movdqa %xmm0, (%edx)
movaps %xmm0, 0x10(%edx)
movaps %xmm0, 0x20(%edx)
movaps %xmm0, 0x30(%edx)
movaps %xmm0, 0x40(%edx)
movaps %xmm0, 0x50(%edx)
movaps %xmm0, 0x60(%edx)
movaps %xmm0, 0x70(%edx)
add $128, %edx
cmp $128, %ecx
jae L(128bytes_L2_normal)
L(128bytesless_L2_normal):
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
L(128bytesormore_nt_start):
sub %ebx, %ecx
ALIGN (4)
L(128bytesormore_shared_cache_loop):
prefetcht0 0x3c0(%edx)
prefetcht0 0x380(%edx)
sub $0x80, %ebx
movdqa %xmm0, (%edx)
movdqa %xmm0, 0x10(%edx)
movdqa %xmm0, 0x20(%edx)
movdqa %xmm0, 0x30(%edx)
movdqa %xmm0, 0x40(%edx)
movdqa %xmm0, 0x50(%edx)
movdqa %xmm0, 0x60(%edx)
movdqa %xmm0, 0x70(%edx)
add $0x80, %edx
cmp $0x80, %ebx
jae L(128bytesormore_shared_cache_loop)
cmp $0x80, %ecx
jb L(shared_cache_loop_end)
ALIGN (4)
L(128bytesormore_nt):
sub $0x80, %ecx
movntdq %xmm0, (%edx)
movntdq %xmm0, 0x10(%edx)
movntdq %xmm0, 0x20(%edx)
movntdq %xmm0, 0x30(%edx)
movntdq %xmm0, 0x40(%edx)
movntdq %xmm0, 0x50(%edx)
movntdq %xmm0, 0x60(%edx)
movntdq %xmm0, 0x70(%edx)
add $0x80, %edx
cmp $0x80, %ecx
jae L(128bytesormore_nt)
sfence
L(shared_cache_loop_end):
#if defined DATA_CACHE_SIZE || !defined SHARED
POP (%ebx)
#endif
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
.pushsection .rodata.sse2,"a",@progbits
ALIGN (2)
L(table_16_128bytes):
.int JMPTBL (L(aligned_16_0bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_1bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_2bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_3bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_4bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_5bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_6bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_7bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_8bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_9bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_10bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_11bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_12bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_13bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_14bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_15bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_16bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_17bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_18bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_19bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_20bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_21bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_22bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_23bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_24bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_25bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_26bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_27bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_28bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_29bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_30bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_31bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_32bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_33bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_34bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_35bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_36bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_37bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_38bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_39bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_40bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_41bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_42bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_43bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_44bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_45bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_46bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_47bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_48bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_49bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_50bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_51bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_52bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_53bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_54bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_55bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_56bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_57bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_58bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_59bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_60bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_61bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_62bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_63bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_64bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_65bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_66bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_67bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_68bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_69bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_70bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_71bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_72bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_73bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_74bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_75bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_76bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_77bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_78bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_79bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_80bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_81bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_82bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_83bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_84bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_85bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_86bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_87bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_88bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_89bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_90bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_91bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_92bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_93bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_94bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_95bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_96bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_97bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_98bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_99bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_100bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_101bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_102bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_103bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_104bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_105bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_106bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_107bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_108bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_109bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_110bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_111bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_112bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_113bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_114bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_115bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_116bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_117bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_118bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_119bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_120bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_121bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_122bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_123bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_124bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_125bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_126bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_127bytes), L(table_16_128bytes))
.popsection
ALIGN (4)
L(aligned_16_112bytes):
movdqa %xmm0, -112(%edx)
L(aligned_16_96bytes):
movdqa %xmm0, -96(%edx)
L(aligned_16_80bytes):
movdqa %xmm0, -80(%edx)
L(aligned_16_64bytes):
movdqa %xmm0, -64(%edx)
L(aligned_16_48bytes):
movdqa %xmm0, -48(%edx)
L(aligned_16_32bytes):
movdqa %xmm0, -32(%edx)
L(aligned_16_16bytes):
movdqa %xmm0, -16(%edx)
L(aligned_16_0bytes):
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_113bytes):
movdqa %xmm0, -113(%edx)
L(aligned_16_97bytes):
movdqa %xmm0, -97(%edx)
L(aligned_16_81bytes):
movdqa %xmm0, -81(%edx)
L(aligned_16_65bytes):
movdqa %xmm0, -65(%edx)
L(aligned_16_49bytes):
movdqa %xmm0, -49(%edx)
L(aligned_16_33bytes):
movdqa %xmm0, -33(%edx)
L(aligned_16_17bytes):
movdqa %xmm0, -17(%edx)
L(aligned_16_1bytes):
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_114bytes):
movdqa %xmm0, -114(%edx)
L(aligned_16_98bytes):
movdqa %xmm0, -98(%edx)
L(aligned_16_82bytes):
movdqa %xmm0, -82(%edx)
L(aligned_16_66bytes):
movdqa %xmm0, -66(%edx)
L(aligned_16_50bytes):
movdqa %xmm0, -50(%edx)
L(aligned_16_34bytes):
movdqa %xmm0, -34(%edx)
L(aligned_16_18bytes):
movdqa %xmm0, -18(%edx)
L(aligned_16_2bytes):
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_115bytes):
movdqa %xmm0, -115(%edx)
L(aligned_16_99bytes):
movdqa %xmm0, -99(%edx)
L(aligned_16_83bytes):
movdqa %xmm0, -83(%edx)
L(aligned_16_67bytes):
movdqa %xmm0, -67(%edx)
L(aligned_16_51bytes):
movdqa %xmm0, -51(%edx)
L(aligned_16_35bytes):
movdqa %xmm0, -35(%edx)
L(aligned_16_19bytes):
movdqa %xmm0, -19(%edx)
L(aligned_16_3bytes):
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_116bytes):
movdqa %xmm0, -116(%edx)
L(aligned_16_100bytes):
movdqa %xmm0, -100(%edx)
L(aligned_16_84bytes):
movdqa %xmm0, -84(%edx)
L(aligned_16_68bytes):
movdqa %xmm0, -68(%edx)
L(aligned_16_52bytes):
movdqa %xmm0, -52(%edx)
L(aligned_16_36bytes):
movdqa %xmm0, -36(%edx)
L(aligned_16_20bytes):
movdqa %xmm0, -20(%edx)
L(aligned_16_4bytes):
movl %eax, -4(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_117bytes):
movdqa %xmm0, -117(%edx)
L(aligned_16_101bytes):
movdqa %xmm0, -101(%edx)
L(aligned_16_85bytes):
movdqa %xmm0, -85(%edx)
L(aligned_16_69bytes):
movdqa %xmm0, -69(%edx)
L(aligned_16_53bytes):
movdqa %xmm0, -53(%edx)
L(aligned_16_37bytes):
movdqa %xmm0, -37(%edx)
L(aligned_16_21bytes):
movdqa %xmm0, -21(%edx)
L(aligned_16_5bytes):
movl %eax, -5(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_118bytes):
movdqa %xmm0, -118(%edx)
L(aligned_16_102bytes):
movdqa %xmm0, -102(%edx)
L(aligned_16_86bytes):
movdqa %xmm0, -86(%edx)
L(aligned_16_70bytes):
movdqa %xmm0, -70(%edx)
L(aligned_16_54bytes):
movdqa %xmm0, -54(%edx)
L(aligned_16_38bytes):
movdqa %xmm0, -38(%edx)
L(aligned_16_22bytes):
movdqa %xmm0, -22(%edx)
L(aligned_16_6bytes):
movl %eax, -6(%edx)
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_119bytes):
movdqa %xmm0, -119(%edx)
L(aligned_16_103bytes):
movdqa %xmm0, -103(%edx)
L(aligned_16_87bytes):
movdqa %xmm0, -87(%edx)
L(aligned_16_71bytes):
movdqa %xmm0, -71(%edx)
L(aligned_16_55bytes):
movdqa %xmm0, -55(%edx)
L(aligned_16_39bytes):
movdqa %xmm0, -39(%edx)
L(aligned_16_23bytes):
movdqa %xmm0, -23(%edx)
L(aligned_16_7bytes):
movl %eax, -7(%edx)
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_120bytes):
movdqa %xmm0, -120(%edx)
L(aligned_16_104bytes):
movdqa %xmm0, -104(%edx)
L(aligned_16_88bytes):
movdqa %xmm0, -88(%edx)
L(aligned_16_72bytes):
movdqa %xmm0, -72(%edx)
L(aligned_16_56bytes):
movdqa %xmm0, -56(%edx)
L(aligned_16_40bytes):
movdqa %xmm0, -40(%edx)
L(aligned_16_24bytes):
movdqa %xmm0, -24(%edx)
L(aligned_16_8bytes):
movq %xmm0, -8(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_121bytes):
movdqa %xmm0, -121(%edx)
L(aligned_16_105bytes):
movdqa %xmm0, -105(%edx)
L(aligned_16_89bytes):
movdqa %xmm0, -89(%edx)
L(aligned_16_73bytes):
movdqa %xmm0, -73(%edx)
L(aligned_16_57bytes):
movdqa %xmm0, -57(%edx)
L(aligned_16_41bytes):
movdqa %xmm0, -41(%edx)
L(aligned_16_25bytes):
movdqa %xmm0, -25(%edx)
L(aligned_16_9bytes):
movq %xmm0, -9(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_122bytes):
movdqa %xmm0, -122(%edx)
L(aligned_16_106bytes):
movdqa %xmm0, -106(%edx)
L(aligned_16_90bytes):
movdqa %xmm0, -90(%edx)
L(aligned_16_74bytes):
movdqa %xmm0, -74(%edx)
L(aligned_16_58bytes):
movdqa %xmm0, -58(%edx)
L(aligned_16_42bytes):
movdqa %xmm0, -42(%edx)
L(aligned_16_26bytes):
movdqa %xmm0, -26(%edx)
L(aligned_16_10bytes):
movq %xmm0, -10(%edx)
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_123bytes):
movdqa %xmm0, -123(%edx)
L(aligned_16_107bytes):
movdqa %xmm0, -107(%edx)
L(aligned_16_91bytes):
movdqa %xmm0, -91(%edx)
L(aligned_16_75bytes):
movdqa %xmm0, -75(%edx)
L(aligned_16_59bytes):
movdqa %xmm0, -59(%edx)
L(aligned_16_43bytes):
movdqa %xmm0, -43(%edx)
L(aligned_16_27bytes):
movdqa %xmm0, -27(%edx)
L(aligned_16_11bytes):
movq %xmm0, -11(%edx)
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_124bytes):
movdqa %xmm0, -124(%edx)
L(aligned_16_108bytes):
movdqa %xmm0, -108(%edx)
L(aligned_16_92bytes):
movdqa %xmm0, -92(%edx)
L(aligned_16_76bytes):
movdqa %xmm0, -76(%edx)
L(aligned_16_60bytes):
movdqa %xmm0, -60(%edx)
L(aligned_16_44bytes):
movdqa %xmm0, -44(%edx)
L(aligned_16_28bytes):
movdqa %xmm0, -28(%edx)
L(aligned_16_12bytes):
movq %xmm0, -12(%edx)
movl %eax, -4(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_125bytes):
movdqa %xmm0, -125(%edx)
L(aligned_16_109bytes):
movdqa %xmm0, -109(%edx)
L(aligned_16_93bytes):
movdqa %xmm0, -93(%edx)
L(aligned_16_77bytes):
movdqa %xmm0, -77(%edx)
L(aligned_16_61bytes):
movdqa %xmm0, -61(%edx)
L(aligned_16_45bytes):
movdqa %xmm0, -45(%edx)
L(aligned_16_29bytes):
movdqa %xmm0, -29(%edx)
L(aligned_16_13bytes):
movq %xmm0, -13(%edx)
movl %eax, -5(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_126bytes):
movdqa %xmm0, -126(%edx)
L(aligned_16_110bytes):
movdqa %xmm0, -110(%edx)
L(aligned_16_94bytes):
movdqa %xmm0, -94(%edx)
L(aligned_16_78bytes):
movdqa %xmm0, -78(%edx)
L(aligned_16_62bytes):
movdqa %xmm0, -62(%edx)
L(aligned_16_46bytes):
movdqa %xmm0, -46(%edx)
L(aligned_16_30bytes):
movdqa %xmm0, -30(%edx)
L(aligned_16_14bytes):
movq %xmm0, -14(%edx)
movl %eax, -6(%edx)
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_127bytes):
movdqa %xmm0, -127(%edx)
L(aligned_16_111bytes):
movdqa %xmm0, -111(%edx)
L(aligned_16_95bytes):
movdqa %xmm0, -95(%edx)
L(aligned_16_79bytes):
movdqa %xmm0, -79(%edx)
L(aligned_16_63bytes):
movdqa %xmm0, -63(%edx)
L(aligned_16_47bytes):
movdqa %xmm0, -47(%edx)
L(aligned_16_31bytes):
movdqa %xmm0, -31(%edx)
L(aligned_16_15bytes):
movq %xmm0, -15(%edx)
movl %eax, -7(%edx)
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN_END
END (sse2_memset5_atom)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,40 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# define ssse3_strcmp_latest strcmp
# include "ssse3-strcmp.S"
#else
# include "strcmp.S"
#endif

View File

@ -0,0 +1,42 @@
/*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# define USE_AS_STRNCMP
# define ssse3_strcmp_latest strncmp
# include "ssse3-strcmp.S"
#else
# include "strncmp.S"
#endif

View File

@ -77,7 +77,7 @@ syscall_src += arch-x86/syscalls/fsync.S
syscall_src += arch-x86/syscalls/fchown.S
syscall_src += arch-x86/syscalls/sync.S
syscall_src += arch-x86/syscalls/__fcntl64.S
syscall_src += arch-x86/syscalls/fstatfs.S
syscall_src += arch-x86/syscalls/__fstatfs64.S
syscall_src += arch-x86/syscalls/sendfile.S
syscall_src += arch-x86/syscalls/fstatat.S
syscall_src += arch-x86/syscalls/mkdirat.S
@ -164,6 +164,7 @@ syscall_src += arch-x86/syscalls/__syslog.S
syscall_src += arch-x86/syscalls/init_module.S
syscall_src += arch-x86/syscalls/delete_module.S
syscall_src += arch-x86/syscalls/klogctl.S
syscall_src += arch-x86/syscalls/sysinfo.S
syscall_src += arch-x86/syscalls/futex.S
syscall_src += arch-x86/syscalls/epoll_create.S
syscall_src += arch-x86/syscalls/epoll_ctl.S

View File

@ -2,11 +2,11 @@
#include <sys/linux-syscalls.h>
.text
.type fstatfs, @function
.globl fstatfs
.type __fstatfs64, @function
.globl __fstatfs64
.align 4
fstatfs:
__fstatfs64:
pushl %ebx
pushl %ecx
pushl %edx

View File

@ -0,0 +1,23 @@
/* autogenerated by gensyscalls.py */
#include <sys/linux-syscalls.h>
.text
.type sysinfo, @function
.globl sysinfo
.align 4
sysinfo:
pushl %ebx
mov 8(%esp), %ebx
movl $__NR_sysinfo, %eax
int $0x80
cmpl $-129, %eax
jb 1f
negl %eax
pushl %eax
call __set_errno
addl $4, %esp
orl $-1, %eax
1:
popl %ebx
ret

View File

@ -26,14 +26,17 @@
* SUCH DAMAGE.
*/
#include <stddef.h>
extern char** environ;
int clearenv(void)
{
char **P = environ;
int offset;
char **P = environ;
for (P = &environ[offset]; *P; ++P)
*P = 0;
return 0;
if (P != NULL) {
for (; *P; ++P)
*P = NULL;
}
return 0;
}

View File

@ -29,7 +29,7 @@
#include <stdio.h>
#include <errno.h>
#include <sys/stat.h>
//#include <sys/types.h>
#include "cpuacct.h"
int cpuacct_add(uid_t uid)
{

View File

@ -27,6 +27,7 @@
*/
#include <unistd.h>
#include "pthread_internal.h"
#include "cpuacct.h"
extern int __fork(void);

View File

@ -548,9 +548,9 @@ fts_build(FTS *sp, int type)
DIR *dirp;
void *oldaddr;
size_t len, maxlen;
int nitems, cderrno, descend, level, nlinks, nostat, doadjust;
int nitems, cderrno, descend, level, nlinks, nostat = 0, doadjust;
int saved_errno;
char *cp;
char *cp = NULL;
/* Set current node pointer. */
cur = sp->fts_cur;

View File

@ -38,12 +38,17 @@
#include <stdarg.h>
#include <fcntl.h>
#include <cutils/logger.h>
#include "logd.h"
/* should match system/core/include/cutils/logger.h */
#define LOGGER_LOG_MAIN "log/main"
#define LOGGER_LOG_RADIO "log/radio"
#define LOGGER_LOG_EVENTS "log/events"
#define LOGGER_LOG_SYSTEM "log/system"
#include <pthread.h>
#define LOG_BUF_SIZE 1024
#define LOG_BUF_SIZE 1024
typedef enum {
LOG_ID_NONE = 0,
@ -114,6 +119,8 @@ static int __write_to_log_init(log_id_t log_id, struct iovec *vec)
(fd < 0) ? __write_to_log_null : __write_to_log_kernel;
log_channels[log_id].fd = fd;
log_channels[log_id].fd = fd;
pthread_mutex_unlock(&log_init_lock);
return log_channels[log_id].logger(log_id, vec);

View File

@ -60,34 +60,43 @@ HashTable gHashTable;
static int hash_entry_compare(const void* arg1, const void* arg2)
{
int result;
HashEntry* e1 = *(HashEntry**)arg1;
HashEntry* e2 = *(HashEntry**)arg2;
size_t nbAlloc1 = e1->allocations;
size_t nbAlloc2 = e2->allocations;
size_t size1 = e1->size & ~SIZE_FLAG_MASK;
size_t size2 = e2->size & ~SIZE_FLAG_MASK;
size_t alloc1 = nbAlloc1 * size1;
size_t alloc2 = nbAlloc2 * size2;
// sort in descending order by:
// 1) total size
// 2) number of allocations
//
// This is used for sorting, not determination of equality, so we don't
// need to compare the bit flags.
int result;
if (alloc1 > alloc2) {
// if one or both arg pointers are null, deal gracefully
if (e1 == NULL) {
result = (e2 == NULL) ? 0 : 1;
} else if (e2 == NULL) {
result = -1;
} else if (alloc1 < alloc2) {
result = 1;
} else {
if (nbAlloc1 > nbAlloc2) {
size_t nbAlloc1 = e1->allocations;
size_t nbAlloc2 = e2->allocations;
size_t size1 = e1->size & ~SIZE_FLAG_MASK;
size_t size2 = e2->size & ~SIZE_FLAG_MASK;
size_t alloc1 = nbAlloc1 * size1;
size_t alloc2 = nbAlloc2 * size2;
// sort in descending order by:
// 1) total size
// 2) number of allocations
//
// This is used for sorting, not determination of equality, so we don't
// need to compare the bit flags.
int result;
if (alloc1 > alloc2) {
result = -1;
} else if (nbAlloc1 < nbAlloc2) {
} else if (alloc1 < alloc2) {
result = 1;
} else {
result = 0;
if (nbAlloc1 > nbAlloc2) {
result = -1;
} else if (nbAlloc1 < nbAlloc2) {
result = 1;
} else {
result = 0;
}
}
}
return result;
@ -149,7 +158,7 @@ void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
if (*info == NULL) {
*overallSize = 0;
goto done;
goto out_nomem_info;
}
qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
@ -161,8 +170,7 @@ void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
if (entrySize < *infoSize) {
/* we're writing less than a full entry, clear out the rest */
/* TODO: only clear out the part we're not overwriting? */
memset(head, 0, *infoSize);
memset(head + entrySize, 0, *infoSize - entrySize);
} else {
/* make sure the amount we're copying doesn't exceed the limit */
entrySize = *infoSize;
@ -171,6 +179,7 @@ void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
head += *infoSize;
}
out_nomem_info:
dlfree(list);
done:

View File

@ -149,6 +149,8 @@ static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_
} else {
// create a new entry
entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
if (!entry)
return NULL;
entry->allocations = 1;
entry->slot = slot;
entry->prev = NULL;

View File

@ -0,0 +1,347 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "pthread_internal.h"
#include <errno.h>
/* Technical note:
*
* Possible states of a read/write lock:
*
* - no readers and no writer (unlocked)
* - one or more readers sharing the lock at the same time (read-locked)
* - one writer holding the lock (write-lock)
*
* Additionally:
* - trying to get the write-lock while there are any readers blocks
* - trying to get the read-lock while there is a writer blocks
* - a single thread can acquire the lock multiple times in the same mode
*
* - Posix states that behaviour is undefined it a thread tries to acquire
* the lock in two distinct modes (e.g. write after read, or read after write).
*
* - This implementation tries to avoid writer starvation by making the readers
* block as soon as there is a waiting writer on the lock. However, it cannot
* completely eliminate it: each time the lock is unlocked, all waiting threads
* are woken and battle for it, which one gets it depends on the kernel scheduler
* and is semi-random.
*
*/
#define __likely(cond) __builtin_expect(!!(cond), 1)
#define __unlikely(cond) __builtin_expect(!!(cond), 0)
#define RWLOCKATTR_DEFAULT 0
#define RWLOCKATTR_SHARED_MASK 0x0010
extern pthread_internal_t* __get_thread(void);
/* Return a global kernel ID for the current thread */
static int __get_thread_id(void)
{
return __get_thread()->kernel_id;
}
int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
{
if (!attr)
return EINVAL;
*attr = PTHREAD_PROCESS_PRIVATE;
return 0;
}
int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
{
if (!attr)
return EINVAL;
*attr = -1;
return 0;
}
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
{
if (!attr)
return EINVAL;
switch (pshared) {
case PTHREAD_PROCESS_PRIVATE:
case PTHREAD_PROCESS_SHARED:
*attr = pshared;
return 0;
default:
return EINVAL;
}
}
int pthread_rwlockattr_getpshared(pthread_rwlockattr_t *attr, int *pshared)
{
if (!attr || !pshared)
return EINVAL;
*pshared = *attr;
return 0;
}
int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
{
pthread_mutexattr_t* lock_attr = NULL;
pthread_condattr_t* cond_attr = NULL;
pthread_mutexattr_t lock_attr0;
pthread_condattr_t cond_attr0;
int ret;
if (rwlock == NULL)
return EINVAL;
if (attr && *attr == PTHREAD_PROCESS_SHARED) {
lock_attr = &lock_attr0;
pthread_mutexattr_init(lock_attr);
pthread_mutexattr_setpshared(lock_attr, PTHREAD_PROCESS_SHARED);
cond_attr = &cond_attr0;
pthread_condattr_init(cond_attr);
pthread_condattr_setpshared(cond_attr, PTHREAD_PROCESS_SHARED);
}
ret = pthread_mutex_init(&rwlock->lock, lock_attr);
if (ret != 0)
return ret;
ret = pthread_cond_init(&rwlock->cond, cond_attr);
if (ret != 0) {
pthread_mutex_destroy(&rwlock->lock);
return ret;
}
rwlock->numLocks = 0;
rwlock->pendingReaders = 0;
rwlock->pendingWriters = 0;
rwlock->writerThreadId = 0;
return 0;
}
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
int ret;
if (rwlock == NULL)
return EINVAL;
if (rwlock->numLocks > 0)
return EBUSY;
pthread_cond_destroy(&rwlock->cond);
pthread_mutex_destroy(&rwlock->lock);
return 0;
}
/* Returns TRUE iff we can acquire a read lock. */
static __inline__ int read_precondition(pthread_rwlock_t *rwlock, int thread_id)
{
/* We can't have the lock if any writer is waiting for it (writer bias).
* This tries to avoid starvation when there are multiple readers racing.
*/
if (rwlock->pendingWriters > 0)
return 0;
/* We can have the lock if there is no writer, or if we write-own it */
/* The second test avoids a self-dead lock in case of buggy code. */
if (rwlock->writerThreadId == 0 || rwlock->writerThreadId == thread_id)
return 1;
/* Otherwise, we can't have it */
return 0;
}
/* returns TRUE iff we can acquire a write lock. */
static __inline__ int write_precondition(pthread_rwlock_t *rwlock, int thread_id)
{
/* We can get the lock if nobody has it */
if (rwlock->numLocks == 0)
return 1;
/* Or if we already own it */
if (rwlock->writerThreadId == thread_id)
return 1;
/* Otherwise, not */
return 0;
}
/* This function is used to waken any waiting thread contending
* for the lock. One of them should be able to grab it after
* that.
*/
static void _pthread_rwlock_pulse(pthread_rwlock_t *rwlock)
{
if (rwlock->pendingReaders > 0 || rwlock->pendingWriters > 0)
pthread_cond_broadcast(&rwlock->cond);
}
int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
{
return pthread_rwlock_timedrdlock(rwlock, NULL);
}
int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
{
int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
if (__unlikely(!read_precondition(rwlock, __get_thread_id())))
ret = EBUSY;
else
rwlock->numLocks ++;
pthread_mutex_unlock(&rwlock->lock);
return ret;
}
int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
{
int thread_id, ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
thread_id = __get_thread_id();
if (__unlikely(!read_precondition(rwlock, thread_id))) {
rwlock->pendingReaders += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
} while (ret == 0 && !read_precondition(rwlock, thread_id));
rwlock->pendingReaders -= 1;
if (ret != 0)
goto EXIT;
}
rwlock->numLocks ++;
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
}
int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
{
return pthread_rwlock_timedwrlock(rwlock, NULL);
}
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
{
int thread_id, ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
thread_id = __get_thread_id();
if (__unlikely(!write_precondition(rwlock, thread_id))) {
ret = EBUSY;
} else {
rwlock->numLocks ++;
rwlock->writerThreadId = thread_id;
}
pthread_mutex_unlock(&rwlock->lock);
return ret;
}
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
{
int thread_id, ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
thread_id = __get_thread_id();
if (__unlikely(!write_precondition(rwlock, thread_id))) {
/* If we can't read yet, wait until the rwlock is unlocked
* and try again. Increment pendingReaders to get the
* cond broadcast when that happens.
*/
rwlock->pendingWriters += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
} while (ret == 0 && !write_precondition(rwlock, thread_id));
rwlock->pendingWriters -= 1;
if (ret != 0)
goto EXIT;
}
rwlock->numLocks ++;
rwlock->writerThreadId = thread_id;
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
}
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
{
int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
/* The lock must be held */
if (rwlock->numLocks == 0) {
ret = EPERM;
goto EXIT;
}
/* If it has only readers, writerThreadId is 0 */
if (rwlock->writerThreadId == 0) {
if (--rwlock->numLocks == 0)
_pthread_rwlock_pulse(rwlock);
}
/* Otherwise, it has only a single writer, which
* must be ourselves.
*/
else {
if (rwlock->writerThreadId != __get_thread_id()) {
ret = EPERM;
goto EXIT;
}
if (--rwlock->numLocks == 0) {
rwlock->writerThreadId = 0;
_pthread_rwlock_pulse(rwlock);
}
}
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
}

View File

@ -43,14 +43,28 @@
#include <memory.h>
#include <assert.h>
#include <malloc.h>
#include <linux/futex.h>
#include <cutils/atomic-inline.h>
#include <bionic_futex.h>
#include <bionic_atomic_inline.h>
#include <sys/prctl.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
extern void _exit_thread(int retCode);
extern int __set_errno(int);
int __futex_wake_ex(volatile void *ftx, int pshared, int val)
{
return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
}
int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout)
{
return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
}
#define __likely(cond) __builtin_expect(!!(cond), 1)
#define __unlikely(cond) __builtin_expect(!!(cond), 0)
@ -713,24 +727,6 @@ int pthread_setschedparam(pthread_t thid, int policy,
}
int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
int __futex_wake(volatile void *ftx, int count);
int __futex_syscall3(volatile void *ftx, int op, int val);
int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout);
#ifndef FUTEX_PRIVATE_FLAG
#define FUTEX_PRIVATE_FLAG 128
#endif
#ifndef FUTEX_WAIT_PRIVATE
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
#endif
#ifndef FUTEX_WAKE_PRIVATE
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
#endif
// mutex lock states
//
// 0: unlocked
@ -884,8 +880,13 @@ int pthread_mutex_init(pthread_mutex_t *mutex,
int pthread_mutex_destroy(pthread_mutex_t *mutex)
{
if (__unlikely(mutex == NULL))
return EINVAL;
int ret;
/* use trylock to ensure that the mutex value is
* valid and is not already locked. */
ret = pthread_mutex_trylock(mutex);
if (ret != 0)
return ret;
mutex->value = 0xdead10cc;
return 0;
@ -932,10 +933,8 @@ _normal_lock(pthread_mutex_t* mutex)
* that the mutex is in state 2 when we go to sleep on it, which
* guarantees a wake-up call.
*/
int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
__futex_syscall4(&mutex->value, wait_op, shared|2, 0);
__futex_wait_ex(&mutex->value, shared, shared|2, 0);
}
ANDROID_MEMBAR_FULL();
}
@ -958,7 +957,6 @@ _normal_unlock(pthread_mutex_t* mutex)
* if it wasn't 1 we have to do some additional work.
*/
if (__atomic_dec(&mutex->value) != (shared|1)) {
int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
/*
* Start by releasing the lock. The decrement changed it from
* "contended lock" to "uncontended lock", which means we still
@ -996,7 +994,7 @@ _normal_unlock(pthread_mutex_t* mutex)
* Either way we have correct behavior and nobody is orphaned on
* the wait queue.
*/
__futex_syscall3(&mutex->value, wake_op, 1);
__futex_wake_ex(&mutex->value, shared, 1);
}
}
@ -1016,7 +1014,7 @@ _recursive_unlock(void)
int pthread_mutex_lock(pthread_mutex_t *mutex)
{
int mtype, tid, new_lock_type, shared, wait_op;
int mtype, tid, new_lock_type, shared;
if (__unlikely(mutex == NULL))
return EINVAL;
@ -1061,8 +1059,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
new_lock_type = 1;
/* compute futex wait opcode and restore shared flag in mtype */
wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
mtype |= shared;
mtype |= shared;
for (;;) {
int oldv;
@ -1088,7 +1085,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
*/
new_lock_type = 2;
__futex_syscall4(&mutex->value, wait_op, oldv, NULL);
__futex_wait_ex(&mutex->value, shared, oldv, NULL);
}
return 0;
}
@ -1128,8 +1125,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
/* Wake one waiting thread, if any */
if ((oldv & 3) == 2) {
int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
__futex_syscall3(&mutex->value, wake_op, 1);
__futex_wake_ex(&mutex->value, shared, 1);
}
return 0;
}
@ -1231,7 +1227,7 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
clockid_t clock = CLOCK_MONOTONIC;
struct timespec abstime;
struct timespec ts;
int mtype, tid, oldv, new_lock_type, shared, wait_op;
int mtype, tid, oldv, new_lock_type, shared;
/* compute absolute expiration time */
__timespec_to_relative_msec(&abstime, msecs, clock);
@ -1245,8 +1241,6 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
/* Handle common case first */
if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
{
int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
/* fast path for uncontended lock */
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
ANDROID_MEMBAR_FULL();
@ -1258,7 +1252,7 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
return EBUSY;
__futex_syscall4(&mutex->value, wait_op, shared|2, &ts);
__futex_wait_ex(&mutex->value, shared, shared|2, &ts);
}
ANDROID_MEMBAR_FULL();
return 0;
@ -1291,7 +1285,6 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
new_lock_type = 1;
/* Compute wait op and restore sharing bit in mtype */
wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
mtype |= shared;
for (;;) {
@ -1322,7 +1315,7 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
return EBUSY;
__futex_syscall4(&mutex->value, wait_op, oldv, &ts);
__futex_wait_ex(&mutex->value, shared, oldv, &ts);
}
return 0;
}
@ -1415,7 +1408,6 @@ static int
__pthread_cond_pulse(pthread_cond_t *cond, int counter)
{
long flags;
int wake_op;
if (__unlikely(cond == NULL))
return EINVAL;
@ -1429,8 +1421,7 @@ __pthread_cond_pulse(pthread_cond_t *cond, int counter)
break;
}
wake_op = COND_IS_SHARED(cond) ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
__futex_syscall3(&cond->value, wake_op, counter);
__futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
return 0;
}
@ -1455,10 +1446,9 @@ int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
{
int status;
int oldvalue = cond->value;
int wait_op = COND_IS_SHARED(cond) ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
pthread_mutex_unlock(mutex);
status = __futex_syscall4(&cond->value, wait_op, oldvalue, reltime);
status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime);
pthread_mutex_lock(mutex);
if (status == (-ETIMEDOUT)) return ETIMEDOUT;
@ -1888,3 +1878,54 @@ int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) )
}
return 0;
}
/* This value is not exported by kernel headers, so hardcode it here */
#define MAX_TASK_COMM_LEN 16
#define TASK_COMM_FMT "/proc/self/task/%u/comm"
int pthread_setname_np(pthread_t thid, const char *thname)
{
size_t thname_len;
int saved_errno, ret;
if (thid == 0 || thname == NULL)
return EINVAL;
thname_len = strlen(thname);
if (thname_len >= MAX_TASK_COMM_LEN)
return ERANGE;
saved_errno = errno;
if (thid == pthread_self())
{
ret = prctl(PR_SET_NAME, (unsigned long)thname, 0, 0, 0) ? errno : 0;
}
else
{
/* Have to change another thread's name */
pthread_internal_t *thread = (pthread_internal_t *)thid;
char comm_name[sizeof(TASK_COMM_FMT) + 8];
ssize_t n;
int fd;
snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, (unsigned int)thread->kernel_id);
fd = open(comm_name, O_RDWR);
if (fd == -1)
{
ret = errno;
goto exit;
}
n = TEMP_FAILURE_RETRY(write(fd, thname, thname_len));
close(fd);
if (n < 0)
ret = errno;
else if ((size_t)n != thname_len)
ret = EIO;
else
ret = 0;
}
exit:
errno = saved_errno;
return ret;
}

View File

@ -30,7 +30,62 @@
#include <sys/time.h>
#include <sys/atomics.h>
#include <time.h>
#include <cutils/atomic-inline.h>
#include <bionic_atomic_inline.h>
#include <bionic_futex.h>
#include <limits.h>
/* In this implementation, a semaphore contains a
* 31-bit signed value and a 1-bit 'shared' flag
* (for process-sharing purpose).
*
* We use the value -1 to indicate contention on the
* semaphore, 0 or more to indicate uncontended state,
* any value lower than -2 is invalid at runtime.
*
* State diagram:
*
* post(1) ==> 2
* post(0) ==> 1
* post(-1) ==> 1, then wake all waiters
*
* wait(2) ==> 1
* wait(1) ==> 0
* wait(0) ==> -1 then wait for a wake up + loop
* wait(-1) ==> -1 then wait for a wake up + loop
*
*/
/* Use the upper 31-bits for the counter, and the lower one
* for the shared flag.
*/
#define SEMCOUNT_SHARED_MASK 0x00000001
#define SEMCOUNT_VALUE_MASK 0xfffffffe
#define SEMCOUNT_VALUE_SHIFT 1
/* Maximum unsigned value that can be stored in the semaphore.
* One bit is used for the shared flag, another one for the
* sign bit, leaving us with only 30 bits.
*/
#define SEM_MAX_VALUE 0x3fffffff
/* convert a value into the corresponding sem->count bit pattern */
#define SEMCOUNT_FROM_VALUE(val) (((val) << SEMCOUNT_VALUE_SHIFT) & SEMCOUNT_VALUE_MASK)
/* convert a sem->count bit pattern into the corresponding signed value */
#define SEMCOUNT_TO_VALUE(sval) ((int)(sval) >> SEMCOUNT_VALUE_SHIFT)
/* the value +1 as a sem->count bit-pattern. */
#define SEMCOUNT_ONE SEMCOUNT_FROM_VALUE(1)
/* the value -1 as a sem->count bit-pattern. */
#define SEMCOUNT_MINUS_ONE SEMCOUNT_FROM_VALUE(-1)
#define SEMCOUNT_DECREMENT(sval) (((sval) - (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK)
#define SEMCOUNT_INCREMENT(sval) (((sval) + (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK)
/* return the shared bitflag from a semaphore */
#define SEM_GET_SHARED(sem) ((sem)->count & SEMCOUNT_SHARED_MASK)
int sem_init(sem_t *sem, int pshared, unsigned int value)
{
@ -39,26 +94,34 @@ int sem_init(sem_t *sem, int pshared, unsigned int value)
return -1;
}
if (pshared != 0) {
errno = ENOSYS;
/* ensure that 'value' can be stored in the semaphore */
if (value > SEM_MAX_VALUE) {
errno = EINVAL;
return -1;
}
sem->count = value;
sem->count = SEMCOUNT_FROM_VALUE(value);
if (pshared != 0)
sem->count |= SEMCOUNT_SHARED_MASK;
return 0;
}
int sem_destroy(sem_t *sem)
{
int count;
if (sem == NULL) {
errno = EINVAL;
return -1;
}
if (sem->count == 0) {
count = SEMCOUNT_TO_VALUE(sem->count);
if (count < 0) {
errno = EBUSY;
return -1;
}
sem->count = 0;
return 0;
}
@ -91,32 +154,111 @@ int sem_unlink(const char * name)
}
/* Decrement a semaphore's value atomically,
* and return the old one. As a special case,
* this returns immediately if the value is
* negative (i.e. -1)
*/
static int
__atomic_dec_if_positive( volatile unsigned int* pvalue )
__sem_dec(volatile unsigned int *pvalue)
{
unsigned int old;
unsigned int shared = (*pvalue & SEMCOUNT_SHARED_MASK);
unsigned int old, new;
int ret;
do {
old = *pvalue;
}
while ( old != 0 && __atomic_cmpxchg( (int)old, (int)old-1, (volatile int*)pvalue ) != 0 );
old = (*pvalue & SEMCOUNT_VALUE_MASK);
ret = SEMCOUNT_TO_VALUE(old);
if (ret < 0)
break;
return old;
new = SEMCOUNT_DECREMENT(old);
}
while (__atomic_cmpxchg((int)(old|shared),
(int)(new|shared),
(volatile int *)pvalue) != 0);
return ret;
}
/* Same as __sem_dec, but will not touch anything if the
* value is already negative *or* 0. Returns the old value.
*/
static int
__sem_trydec(volatile unsigned int *pvalue)
{
unsigned int shared = (*pvalue & SEMCOUNT_SHARED_MASK);
unsigned int old, new;
int ret;
do {
old = (*pvalue & SEMCOUNT_VALUE_MASK);
ret = SEMCOUNT_TO_VALUE(old);
if (ret <= 0)
break;
new = SEMCOUNT_DECREMENT(old);
}
while (__atomic_cmpxchg((int)(old|shared),
(int)(new|shared),
(volatile int *)pvalue) != 0);
return ret;
}
/* "Increment" the value of a semaphore atomically and
* return its old value. Note that this implements
* the special case of "incrementing" any negative
* value to +1 directly.
*
* NOTE: The value will _not_ wrap above SEM_VALUE_MAX
*/
static int
__sem_inc(volatile unsigned int *pvalue)
{
unsigned int shared = (*pvalue & SEMCOUNT_SHARED_MASK);
unsigned int old, new;
int ret;
do {
old = (*pvalue & SEMCOUNT_VALUE_MASK);
ret = SEMCOUNT_TO_VALUE(old);
/* Can't go higher than SEM_MAX_VALUE */
if (ret == SEM_MAX_VALUE)
break;
/* If the counter is negative, go directly to +1,
* otherwise just increment */
if (ret < 0)
new = SEMCOUNT_ONE;
else
new = SEMCOUNT_INCREMENT(old);
}
while ( __atomic_cmpxchg((int)(old|shared),
(int)(new|shared),
(volatile int*)pvalue) != 0);
return ret;
}
/* lock a semaphore */
int sem_wait(sem_t *sem)
{
unsigned shared;
if (sem == NULL) {
errno = EINVAL;
return -1;
}
shared = SEM_GET_SHARED(sem);
for (;;) {
if (__atomic_dec_if_positive(&sem->count))
if (__sem_dec(&sem->count) > 0)
break;
__futex_wait(&sem->count, 0, 0);
__futex_wait_ex(&sem->count, shared, shared|SEMCOUNT_MINUS_ONE, NULL);
}
ANDROID_MEMBAR_FULL();
return 0;
@ -125,6 +267,7 @@ int sem_wait(sem_t *sem)
int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
{
int ret;
unsigned int shared;
if (sem == NULL) {
errno = EINVAL;
@ -132,13 +275,15 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
}
/* POSIX says we need to try to decrement the semaphore
* before checking the timeout value */
if (__atomic_dec_if_positive(&sem->count)) {
* before checking the timeout value. Note that if the
* value is currently 0, __sem_trydec() does nothing.
*/
if (__sem_trydec(&sem->count) > 0) {
ANDROID_MEMBAR_FULL();
return 0;
}
/* check it as per Posix */
/* Check it as per Posix */
if (abs_timeout == NULL ||
abs_timeout->tv_sec < 0 ||
abs_timeout->tv_nsec < 0 ||
@ -148,6 +293,8 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
return -1;
}
shared = SEM_GET_SHARED(sem);
for (;;) {
struct timespec ts;
int ret;
@ -166,31 +313,47 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
return -1;
}
ret = __futex_wait(&sem->count, 0, &ts);
/* Try to grab the semaphore. If the value was 0, this
* will also change it to -1 */
if (__sem_dec(&sem->count) > 0) {
ANDROID_MEMBAR_FULL();
break;
}
/* Contention detected. wait for a wakeup event */
ret = __futex_wait_ex(&sem->count, shared, shared|SEMCOUNT_MINUS_ONE, &ts);
/* return in case of timeout or interrupt */
if (ret == -ETIMEDOUT || ret == -EINTR) {
errno = -ret;
return -1;
}
if (__atomic_dec_if_positive(&sem->count)) {
ANDROID_MEMBAR_FULL();
break;
}
}
return 0;
}
/* unlock a semaphore */
/* Unlock a semaphore */
int sem_post(sem_t *sem)
{
unsigned int shared;
int old;
if (sem == NULL)
return EINVAL;
shared = SEM_GET_SHARED(sem);
ANDROID_MEMBAR_FULL();
if (__atomic_inc((volatile int*)&sem->count) >= 0)
__futex_wake(&sem->count, 1);
old = __sem_inc(&sem->count);
if (old < 0) {
/* contention on the semaphore, wake up all waiters */
__futex_wake_ex(&sem->count, shared, INT_MAX);
}
else if (old == SEM_MAX_VALUE) {
/* overflow detected */
errno = EOVERFLOW;
return -1;
}
return 0;
}
@ -202,7 +365,7 @@ int sem_trywait(sem_t *sem)
return -1;
}
if (__atomic_dec_if_positive(&sem->count) > 0) {
if (__sem_trydec(&sem->count) > 0) {
ANDROID_MEMBAR_FULL();
return 0;
} else {
@ -211,13 +374,29 @@ int sem_trywait(sem_t *sem)
}
}
/* Note that Posix requires that sem_getvalue() returns, in
* case of contention, the negative of the number of waiting
* threads.
*
* However, code that depends on this negative value to be
* meaningful is most probably racy. The GLibc sem_getvalue()
* only returns the semaphore value, which is 0, in case of
* contention, so we will mimick this behaviour here instead
* for better compatibility.
*/
int sem_getvalue(sem_t *sem, int *sval)
{
int val;
if (sem == NULL || sval == NULL) {
errno = EINVAL;
return -1;
}
*sval = sem->count;
val = SEMCOUNT_TO_VALUE(sem->count);
if (val < 0)
val = 0;
*sval = val;
return 0;
}

View File

@ -126,7 +126,7 @@ int __system_property_read(const prop_info *pi, char *name, char *value)
for(;;) {
serial = pi->serial;
while(SERIAL_DIRTY(serial)) {
__futex_wait(&pi->serial, serial, 0);
__futex_wait((volatile void *)&pi->serial, serial, 0);
serial = pi->serial;
}
len = SERIAL_VALUE_LEN(serial);
@ -164,7 +164,7 @@ int __system_property_wait(const prop_info *pi)
} else {
n = pi->serial;
do {
__futex_wait(&pi->serial, n, 0);
__futex_wait((volatile void *)&pi->serial, n, 0);
} while(n == pi->serial);
}
return 0;

View File

@ -1,7 +1,82 @@
Bionic ChangeLog:
-----------------
Differences between current and Android 2.1:
Differences between current and Android 2.2:
- <pthread.h>: Add reader/writer locks implementation. Add sanity
checking to pthread_mutex_destroy() (e.g. a locked mutex will return
EBUSY).
- <semaphore.h>: Use private futexes for semaphore implementation,
unless your set 'pshared' to non-0 when calling sem_init().
Also fixed a bug in sem_post() to make it wake up all waiting
threads, instead of one. As a consequence, the maximum semaphore
value is now reduced to 0x3fffffff.
- <math.h>: Added sincos(), sincosf() and sincosl() (GLibc compatibility).
- <sys/sysinfo.h>: Added missing sysinfo() system call implementation
(the function was already declared in the header though).
- sysconf() didn't work for some arguments due to a small bug in the
/proc line parser.
- <termio.h>: added missing header (just includes <termios.h>)
- <unistd.h>: add missing declaration for truncate(). The implementation
was already here since Android 1.5.
modify implementation of alarm() to return 0 in case of error (i.e.
if a value larger than 0x7fffffff seconds is passed to it). This
makes the implementation compliant with the GLibc behaviour.
- <wchar.h>: small fixes to really support wchar_t in Bionic (not there yet).
the size of wchar_t is still 32-bit (decided by the compiler)
WCHAR_MIN: changed from 0 to INT_MIN
WCHAR_MAX: changed from 255 to INT_MAX
wcpcpy(), wcpncpy(), wcscat(), wcschr(), wcscmp(),
wcscpy(), wcscspn(), wcsdup(), wcslcat(), wcslcpy(),
wcslen(), wcsncat(), wcsncmp(), wcsncpy(), wcsnlen(),
wcspbrk(), wcsrchr(), wcsrchr(), wcsspn(), wcsstr(),
wcstok(), wcswidth(), wmemchr(), wmemcmp(), wmemcpy(),
wmemmove(), wmemset(): Added proper implementations.
wcscasecmp(), wcsncasecmp(): Added implementation limited
to ASCII codes for lower/upper.
wcscoll(): added dummy implementation that calls wcscmp()
wcsxfrm(): added dummy implementation that calls wcsncpy()
NOTE: Technically, this breaks the ABI, but we never claimed to support
wchar_t anyway. The wchar_t support is still *NOT* official at this
point. We need better multi-byte support code, and wprintf/wscanf
stuff too.
- <inttypes.h>: add missing declarations for strntoimax abd strntoumax.
- <stdlib.h>: add missing declarations for drand48() and erand48().
- clearerr(): fix broken implementation.
- Feature test macros like _POSIX_C_SOURCE / _XOPEN_SOURCE / _C99_SOURCE
are now handled correctly by our C library headers (see <sys/cdefs.h>)
- <sys/select.h>: add missing declaration for pselect()
- <sys/vfs.h>: fixed implementation of fstatfs() (also fixes fpathconf()
which uses it).
- <dlfcn.h>: fixed dlopen() implementation to support dlopen(NULL, ...).
This allows one to look at the dynamic symbols exported by an executable.
-------------------------------------------------------------------------------
Differences between Android 2.2. and Android 2.1:
- Support FP register save/load in setjmp()/longjmp() on ARMv7 builds.
- Add support for SH-4 CPU architecture !

View File

@ -253,6 +253,9 @@ intmax_t imaxabs(intmax_t);
imaxdiv_t imaxdiv(intmax_t, intmax_t);
intmax_t strtoimax(const char *, char **, int);
uintmax_t strtoumax(const char *, char **, int);
intmax_t strntoimax(const char *nptr, char **endptr, int base, size_t n);
uintmax_t strntoumax(const char *nptr, char **endptr, int base, size_t n);
__END_DECLS
#endif /* _INTTYPES_H_ */

View File

@ -86,7 +86,7 @@
#include <sys/limits.h>
#if __POSIX_VISIBLE
#include <arch/syslimits.h>
#include <sys/syslimits.h>
#endif
#ifndef PAGESIZE

View File

@ -219,6 +219,41 @@ int pthread_cond_timeout_np(pthread_cond_t *cond,
*/
int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs);
/* read-write lock support */
typedef int pthread_rwlockattr_t;
typedef struct {
pthread_mutex_t lock;
pthread_cond_t cond;
int numLocks;
int writerThreadId;
int pendingReaders;
int pendingWriters;
void* reserved[4]; /* for future extensibility */
} pthread_rwlock_t;
#define PTHREAD_RWLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0, NULL, 0, 0 }
int pthread_rwlockattr_init(pthread_rwlockattr_t *attr);
int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr);
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared);
int pthread_rwlockattr_getpshared(pthread_rwlockattr_t *attr, int *pshared);
int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout);
int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout);
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *));
int pthread_key_delete (pthread_key_t);
int pthread_setspecific(pthread_key_t key, const void *value);
@ -231,6 +266,8 @@ int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid);
int pthread_once(pthread_once_t *once_control, void (*init_routine)(void));
int pthread_setname_np(pthread_t thid, const char *thname);
typedef void (*__pthread_cleanup_func_t)(void*);
typedef struct __pthread_cleanup_t {

View File

@ -51,6 +51,7 @@ typedef int sig_atomic_t;
#endif
extern const char * const sys_siglist[];
extern const char * const sys_signame[];
static __inline__ int sigismember(sigset_t *set, int signum)
{

View File

@ -305,7 +305,7 @@ char *cuserid(char *);
FILE *fdopen(int, const char *);
int fileno(FILE *);
#if (__POSIX_VISIBLE >= 199209) || 1 /* ANDROID: Bionic does include this */
#if (__POSIX_VISIBLE >= 199209)
int pclose(FILE *);
FILE *popen(const char *, const char *);
#endif

View File

@ -107,6 +107,8 @@ extern long mrand48(void);
extern long nrand48(unsigned short *);
extern long lrand48(void);
extern unsigned short *seed48(unsigned short*);
extern double erand48(unsigned short xsubi[3]);
extern double drand48(void);
extern void srand48(long);
extern unsigned int arc4random(void);
extern void arc4random_stir(void);
@ -135,7 +137,7 @@ extern char* ptsname(int);
extern int ptsname_r(int, char*, size_t);
extern int getpt(void);
static __inline__ int grantpt(int __fd)
static __inline__ int grantpt(int __fd __attribute((unused)))
{
(void)__fd;
return 0; /* devpts does this all for us! */

View File

@ -37,12 +37,6 @@
#ifndef _SYS_CDEFS_H_
#define _SYS_CDEFS_H_
/* our implementation of wchar_t is only 8-bit - die die non-portable code */
#undef __WCHAR_TYPE__
#define __WCHAR_TYPE__ unsigned char
/*
* Macro to test if we're using a GNU C compiler of a specific vintage
* or later, for e.g. features that appeared in a particular version
@ -62,11 +56,6 @@
#define __GNUC_PREREQ__(x, y) 0
#endif
//XXX #include <machine/cdefs.h>
/* BIONIC: simpler definition */
#define __BSD_VISIBLE 1
#include <sys/cdefs_elf.h>
#if defined(__cplusplus)
@ -371,6 +360,142 @@
#define __link_set_entry(set, idx) (__link_set_begin(set)[idx])
/*
* Some of the recend FreeBSD sources used in Bionic need this.
* Originally, this is used to embed the rcs versions of each source file
* in the generated binary. We certainly don't want this in Bionic.
*/
#define __FBSDID(s) struct __hack
/*-
* The following definitions are an extension of the behavior originally
* implemented in <sys/_posix.h>, but with a different level of granularity.
* POSIX.1 requires that the macros we test be defined before any standard
* header file is included.
*
* Here's a quick run-down of the versions:
* defined(_POSIX_SOURCE) 1003.1-1988
* _POSIX_C_SOURCE == 1 1003.1-1990
* _POSIX_C_SOURCE == 2 1003.2-1992 C Language Binding Option
* _POSIX_C_SOURCE == 199309 1003.1b-1993
* _POSIX_C_SOURCE == 199506 1003.1c-1995, 1003.1i-1995,
* and the omnibus ISO/IEC 9945-1: 1996
* _POSIX_C_SOURCE == 200112 1003.1-2001
* _POSIX_C_SOURCE == 200809 1003.1-2008
*
* In addition, the X/Open Portability Guide, which is now the Single UNIX
* Specification, defines a feature-test macro which indicates the version of
* that specification, and which subsumes _POSIX_C_SOURCE.
*
* Our macros begin with two underscores to avoid namespace screwage.
*/
/* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1. */
#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1
#undef _POSIX_C_SOURCE /* Probably illegal, but beyond caring now. */
#define _POSIX_C_SOURCE 199009
#endif
/* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2. */
#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2
#undef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 199209
#endif
/* Deal with various X/Open Portability Guides and Single UNIX Spec. */
#ifdef _XOPEN_SOURCE
#if _XOPEN_SOURCE - 0 >= 700
#define __XSI_VISIBLE 700
#undef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 200809
#elif _XOPEN_SOURCE - 0 >= 600
#define __XSI_VISIBLE 600
#undef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 200112
#elif _XOPEN_SOURCE - 0 >= 500
#define __XSI_VISIBLE 500
#undef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 199506
#endif
#endif
/*
* Deal with all versions of POSIX. The ordering relative to the tests above is
* important.
*/
#if defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE)
#define _POSIX_C_SOURCE 198808
#endif
#ifdef _POSIX_C_SOURCE
#if _POSIX_C_SOURCE >= 200809
#define __POSIX_VISIBLE 200809
#define __ISO_C_VISIBLE 1999
#elif _POSIX_C_SOURCE >= 200112
#define __POSIX_VISIBLE 200112
#define __ISO_C_VISIBLE 1999
#elif _POSIX_C_SOURCE >= 199506
#define __POSIX_VISIBLE 199506
#define __ISO_C_VISIBLE 1990
#elif _POSIX_C_SOURCE >= 199309
#define __POSIX_VISIBLE 199309
#define __ISO_C_VISIBLE 1990
#elif _POSIX_C_SOURCE >= 199209
#define __POSIX_VISIBLE 199209
#define __ISO_C_VISIBLE 1990
#elif _POSIX_C_SOURCE >= 199009
#define __POSIX_VISIBLE 199009
#define __ISO_C_VISIBLE 1990
#else
#define __POSIX_VISIBLE 198808
#define __ISO_C_VISIBLE 0
#endif /* _POSIX_C_SOURCE */
#else
/*-
* Deal with _ANSI_SOURCE:
* If it is defined, and no other compilation environment is explicitly
* requested, then define our internal feature-test macros to zero. This
* makes no difference to the preprocessor (undefined symbols in preprocessing
* expressions are defined to have value zero), but makes it more convenient for
* a test program to print out the values.
*
* If a program mistakenly defines _ANSI_SOURCE and some other macro such as
* _POSIX_C_SOURCE, we will assume that it wants the broader compilation
* environment (and in fact we will never get here).
*/
#if defined(_ANSI_SOURCE) /* Hide almost everything. */
#define __POSIX_VISIBLE 0
#define __XSI_VISIBLE 0
#define __BSD_VISIBLE 0
#define __ISO_C_VISIBLE 1990
#elif defined(_C99_SOURCE) /* Localism to specify strict C99 env. */
#define __POSIX_VISIBLE 0
#define __XSI_VISIBLE 0
#define __BSD_VISIBLE 0
#define __ISO_C_VISIBLE 1999
#else /* Default environment: show everything. */
#define __POSIX_VISIBLE 200809
#define __XSI_VISIBLE 700
#define __BSD_VISIBLE 1
#define __ISO_C_VISIBLE 1999
#endif
#endif
/*
* Default values.
*/
#ifndef __XPG_VISIBLE
# define __XPG_VISIBLE 700
#endif
#ifndef __POSIX_VISIBLE
# define __POSIX_VISIBLE 200809
#endif
#ifndef __ISO_C_VISIBLE
# define __ISO_C_VISIBLE 1999
#endif
#ifndef __BSD_VISIBLE
# define __BSD_VISIBLE 1
#endif
#define __BIONIC__ 1
#endif /* !_SYS_CDEFS_H_ */

View File

@ -127,6 +127,7 @@
#define __NR_init_module (__NR_SYSCALL_BASE + 128)
#define __NR_delete_module (__NR_SYSCALL_BASE + 129)
#define __NR_syslog (__NR_SYSCALL_BASE + 103)
#define __NR_sysinfo (__NR_SYSCALL_BASE + 116)
#define __NR_futex (__NR_SYSCALL_BASE + 240)
#define __NR_poll (__NR_SYSCALL_BASE + 168)

View File

@ -87,7 +87,7 @@ int fsync (int);
int fchown (int, uid_t, gid_t);
void sync (void);
int __fcntl64 (int, int, void *);
int fstatfs (int, size_t, struct statfs *);
int __fstatfs64 (int, size_t, struct statfs *);
ssize_t sendfile (int out_fd, int in_fd, off_t *offset, size_t count);
int fstatat (int dirfd, const char *path, struct stat *buf, int flags);
int mkdirat (int dirfd, const char *pathname, mode_t mode);
@ -191,6 +191,7 @@ int __syslog (int, char *, int);
int init_module (void *, unsigned long, const char *);
int delete_module (const char*, unsigned int);
int klogctl (int, char *, int);
int sysinfo (struct sysinfo *);
int futex (void *, int, int, void *, void *, int);
int epoll_create (int size);
int epoll_ctl (int epfd, int op, int fd, struct epoll_event *event);

View File

@ -31,12 +31,15 @@
#include <sys/cdefs.h>
#include <sys/time.h>
#include <sys/types.h>
#include <signal.h>
__BEGIN_DECLS
typedef __kernel_fd_set fd_set;
extern int select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
extern int pselect(int n, fd_set *readfds, fd_set *writefds, fd_set *errfds,
const struct timespec *timeout, const sigset_t *sigmask);
__END_DECLS

View File

@ -62,8 +62,10 @@ typedef __kernel_ino_t ino_t;
typedef __kernel_key_t key_t;
typedef __kernel_mode_t mode_t;
typedef __kernel_nlink_t nlink_t;
#ifndef _OFF_T_DEFINED_
#define _OFF_T_DEFINED_
typedef __kernel_off_t off_t;
#endif
typedef __kernel_loff_t loff_t;
typedef loff_t off64_t; /* GLibc-specific */

32
libc/include/termio.h Normal file
View File

@ -0,0 +1,32 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* All definitions related to termio are in Linux kernel headers
* that are already included by <termios.h>
*/
#include <termios.h>

View File

@ -124,6 +124,7 @@ extern int readlink(const char *, char *, size_t);
extern int chown(const char *, uid_t, gid_t);
extern int fchown(int, uid_t, gid_t);
extern int lchown(const char *, uid_t, gid_t);
extern int truncate(const char *, off_t);
extern char *getcwd(char *, size_t);
extern int sync(void);

View File

@ -70,9 +70,9 @@ typedef enum {
WC_TYPE_MAX
} wctype_t;
#define WCHAR_MAX 255
#define WCHAR_MIN 0
#define WEOF (-1)
#define WCHAR_MAX INT_MAX
#define WCHAR_MIN INT_MIN
#define WEOF ((wint_t)(-1))
extern wint_t btowc(int);
extern int fwprintf(FILE *, const wchar_t *, ...);

View File

@ -0,0 +1,14 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_ADC_H
#define __ASM_ADC_H
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_ADDRSPACE_H
#define __ASM_SH_ADDRSPACE_H
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_ATOMIC_GRB_H
#define __ASM_SH_ATOMIC_GRB_H
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_ATOMIC_IRQ_H
#define __ASM_SH_ATOMIC_IRQ_H
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_ATOMIC_LLSC_H
#define __ASM_SH_ATOMIC_LLSC_H
#endif

View File

@ -0,0 +1,47 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_ATOMIC_H
#define __ASM_SH_ATOMIC_H
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#include <linux/compiler.h>
#include <asm/system.h>
#include <asm/atomic-llsc.h>
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif

View File

@ -0,0 +1,23 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_AUXVEC_H
#define __ASM_SH_AUXVEC_H
#define AT_FPUCW 18
#define AT_SYSINFO_EHDR 33
#define AT_L1I_CACHESHAPE 34
#define AT_L1D_CACHESHAPE 35
#define AT_L2_CACHESHAPE 36
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_BITOPS_GRB_H
#define __ASM_SH_BITOPS_GRB_H
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_BITOPS_IRQ_H
#define __ASM_SH_BITOPS_IRQ_H
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_BITOPS_H
#define __ASM_SH_BITOPS_H
#endif

View File

@ -0,0 +1,30 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_BUG_H
#define __ASM_SH_BUG_H
#define TRAPA_BUG_OPCODE 0xc33e
#define HAVE_ARCH_BUG
#define HAVE_ARCH_WARN_ON
#define _EMIT_BUG_ENTRY "\t.pushsection __bug_table,\"a\"\n" "2:\t.long 1b\n" "\t.short %O3\n" "\t.org 2b+%O4\n" "\t.popsection\n"
#define BUG() do { __asm__ __volatile__ ( "1:\t.short %O0\n" _EMIT_BUG_ENTRY : : "n" (TRAPA_BUG_OPCODE), "i" (__FILE__), "i" (__LINE__), "i" (0), "i" (sizeof(struct bug_entry))); } while (0)
#define __WARN() do { __asm__ __volatile__ ( "1:\t.short %O0\n" _EMIT_BUG_ENTRY : : "n" (TRAPA_BUG_OPCODE), "i" (__FILE__), "i" (__LINE__), "i" (BUGFLAG_WARNING), "i" (sizeof(struct bug_entry))); } while (0)
#define WARN_ON(x) ({ int __ret_warn_on = !!(x); if (__builtin_constant_p(__ret_warn_on)) { if (__ret_warn_on) __WARN(); } else { if (unlikely(__ret_warn_on)) __WARN(); } unlikely(__ret_warn_on); })
#include <asm-generic/bug.h>
#endif

View File

@ -0,0 +1,19 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_BUGS_H
#define __ASM_SH_BUGS_H
#include <asm/processor.h>
#ifndef __LITTLE_ENDIAN__
#endif
#endif

View File

@ -0,0 +1,77 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_BYTEORDER_H
#define __ASM_SH_BYTEORDER_H
#include <linux/compiler.h>
#include <linux/types.h>
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
__asm__(
#ifdef __SH5__
"byterev %0, %0\n\t"
"shari %0, 32, %0"
#else
"swap.b %0, %0\n\t"
"swap.w %0, %0\n\t"
"swap.b %0, %0"
#endif
: "=r" (x)
: "0" (x));
return x;
}
static inline __attribute_const__ __u16 ___arch__swab16(__u16 x)
{
__asm__(
#ifdef __SH5__
"byterev %0, %0\n\t"
"shari %0, 32, %0"
#else
"swap.b %0, %0"
#endif
: "=r" (x)
: "0" (x));
return x;
}
static inline __u64 ___arch__swab64(__u64 val)
{
union {
struct { __u32 a,b; } s;
__u64 u;
} v, w;
v.u = val;
w.s.b = ___arch__swab32(v.s.a);
w.s.a = ___arch__swab32(v.s.b);
return w.u;
}
#define __arch__swab64(x) ___arch__swab64(x)
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab16(x) ___arch__swab16(x)
#ifndef __STRICT_ANSI__
#define __BYTEORDER_HAS_U64__
#define __SWAB_64_THRU_32__
#endif
#ifdef __LITTLE_ENDIAN__
#include <linux/byteorder/little_endian.h>
#else
#include <linux/byteorder/big_endian.h>
#endif
#endif

View File

@ -0,0 +1,14 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CACHE_H
#define __ASM_SH_CACHE_H
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CACHEFLUSH_H
#define __ASM_SH_CACHEFLUSH_H
#endif

View File

@ -0,0 +1,12 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#include "checksum_32.h"

View File

@ -0,0 +1,22 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CHECKSUM_H
#define __ASM_SH_CHECKSUM_H
#include <linux/in6.h>
#ifdef __LITTLE_ENDIAN__
#else
#endif
#define _HAVE_ARCH_IPV6_CSUM
#define HAVE_CSUM_COPY_USER
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CHECKSUM_64_H
#define __ASM_SH_CHECKSUM_64_H
#endif

View File

@ -0,0 +1,70 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CLOCK_H
#define __ASM_SH_CLOCK_H
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
#include <linux/err.h>
struct clk;
struct clk_ops {
void (*init)(struct clk *clk);
void (*enable)(struct clk *clk);
void (*disable)(struct clk *clk);
void (*recalc)(struct clk *clk);
int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id);
long (*round_rate)(struct clk *clk, unsigned long rate);
};
struct clk {
struct list_head node;
const char *name;
int id;
struct module *owner;
struct clk *parent;
struct clk_ops *ops;
struct kref kref;
unsigned long rate;
unsigned long flags;
unsigned long arch_flags;
};
#define CLK_ALWAYS_ENABLED (1 << 0)
#define CLK_RATE_PROPAGATES (1 << 1)
enum clk_sh_algo_id {
NO_CHANGE = 0,
IUS_N1_N1,
IUS_322,
IUS_522,
IUS_N11,
SB_N1,
SB3_N1,
SB3_32,
SB3_43,
SB3_54,
BP_N1,
IP_N1,
};
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CMPXCHG_GRB_H
#define __ASM_SH_CMPXCHG_GRB_H
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CMPXCHG_IRQ_H
#define __ASM_SH_CMPXCHG_IRQ_H
#endif

View File

@ -0,0 +1,25 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CPU_FEATURES_H
#define __ASM_SH_CPU_FEATURES_H
#define CPU_HAS_FPU 0x0001
#define CPU_HAS_P2_FLUSH_BUG 0x0002
#define CPU_HAS_MMU_PAGE_ASSOC 0x0004
#define CPU_HAS_DSP 0x0008
#define CPU_HAS_PERF_COUNTER 0x0010
#define CPU_HAS_PTEA 0x0020
#define CPU_HAS_LLSC 0x0040
#define CPU_HAS_L2_CACHE 0x0080
#define CPU_HAS_OP32 0x0100
#endif

View File

@ -0,0 +1,17 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __SH_CPUTIME_H
#define __SH_CPUTIME_H
#include <asm-generic/cputime.h>
#endif

View File

@ -0,0 +1,20 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_CURRENT_H
#define __ASM_SH_CURRENT_H
#include <linux/thread_info.h>
struct task_struct;
#define current get_current()
#endif

View File

@ -0,0 +1,19 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_DELAY_H
#define __ASM_SH_DELAY_H
#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : __udelay(n))
#define ndelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : __ndelay(n))
#endif

View File

@ -0,0 +1,15 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#include <asm-generic/device.h>
struct platform_device;

View File

@ -0,0 +1,12 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#include <asm-generic/div64.h>

View File

@ -0,0 +1,31 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm-generic/dma-coherent.h>
#define dma_supported(dev, mask) (1)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
#endif

View File

@ -0,0 +1,14 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_DMA_H
#define __ASM_SH_DMA_H
#endif

View File

@ -0,0 +1,26 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef _DMABRG_H_
#define _DMABRG_H_
#define DMABRGIRQ_USBDMA 0
#define DMABRGIRQ_USBDMAERR 1
#define DMABRGIRQ_A0TXF 2
#define DMABRGIRQ_A0TXH 3
#define DMABRGIRQ_A0RXF 4
#define DMABRGIRQ_A0RXH 5
#define DMABRGIRQ_A1TXF 6
#define DMABRGIRQ_A1TXH 7
#define DMABRGIRQ_A1RXF 8
#define DMABRGIRQ_A1RXH 9
#endif

View File

@ -0,0 +1,17 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_EDOSK7705_IO_H
#define __ASM_SH_EDOSK7705_IO_H
#include <asm/io_generic.h>
#endif

View File

@ -0,0 +1,92 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_ELF_H
#define __ASM_SH_ELF_H
#include <linux/utsname.h>
#include <asm/auxvec.h>
#include <asm/ptrace.h>
#include <asm/user.h>
#define EF_SH_PIC 0x100
#define EF_SH_FDPIC 0x8000
#define R_SH_NONE 0
#define R_SH_DIR32 1
#define R_SH_REL32 2
#define R_SH_DIR8WPN 3
#define R_SH_IND12W 4
#define R_SH_DIR8WPL 5
#define R_SH_DIR8WPZ 6
#define R_SH_DIR8BP 7
#define R_SH_DIR8W 8
#define R_SH_DIR8L 9
#define R_SH_SWITCH16 25
#define R_SH_SWITCH32 26
#define R_SH_USES 27
#define R_SH_COUNT 28
#define R_SH_ALIGN 29
#define R_SH_CODE 30
#define R_SH_DATA 31
#define R_SH_LABEL 32
#define R_SH_SWITCH8 33
#define R_SH_GNU_VTINHERIT 34
#define R_SH_GNU_VTENTRY 35
#define R_SH_TLS_GD_32 144
#define R_SH_TLS_LD_32 145
#define R_SH_TLS_LDO_32 146
#define R_SH_TLS_IE_32 147
#define R_SH_TLS_LE_32 148
#define R_SH_TLS_DTPMOD32 149
#define R_SH_TLS_DTPOFF32 150
#define R_SH_TLS_TPOFF32 151
#define R_SH_GOT32 160
#define R_SH_PLT32 161
#define R_SH_COPY 162
#define R_SH_GLOB_DAT 163
#define R_SH_JMP_SLOT 164
#define R_SH_RELATIVE 165
#define R_SH_GOTOFF 166
#define R_SH_GOTPC 167
#define R_SH_GOT20 70
#define R_SH_GOTOFF20 71
#define R_SH_GOTFUNCDESC 72
#define R_SH_GOTFUNCDESC20 73
#define R_SH_GOTOFFFUNCDESC 74
#define R_SH_GOTOFFFUNCDESC20 75
#define R_SH_FUNCDESC 76
#define R_SH_FUNCDESC_VALUE 77
#define R_SH_IMM_LOW16 246
#define R_SH_IMM_LOW16_PCREL 247
#define R_SH_IMM_MEDLOW16 248
#define R_SH_IMM_MEDLOW16_PCREL 249
#define R_SH_NUM 256
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fpu_struct elf_fpregset_t;
#define ELF_CLASS ELFCLASS32
#ifdef __LITTLE_ENDIAN__
#define ELF_DATA ELFDATA2LSB
#else
#define ELF_DATA ELFDATA2MSB
#endif
#define ELF_ARCH EM_SH
#endif

View File

@ -0,0 +1,17 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif

View File

@ -0,0 +1,17 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_SH_ERRNO_H
#define __ASM_SH_ERRNO_H
#include <asm-generic/errno.h>
#endif

Some files were not shown because too many files have changed in this diff Show More