Merge "libc: Update auto-gen scripts"
This commit is contained in:
committed by
Android (Google) Code Review
commit
62e1f374d1
@@ -219,7 +219,7 @@ int sendmsg:socketcall:16(int, const struct msghdr *, unsigned int) -
|
|||||||
int recvmsg:socketcall:17(int, struct msghdr *, unsigned int) -1,102,-1
|
int recvmsg:socketcall:17(int, struct msghdr *, unsigned int) -1,102,-1
|
||||||
|
|
||||||
# sockets for sh.
|
# sockets for sh.
|
||||||
int __socketcall:__socketcall(int, unsigned long*) -1,-1,102
|
int __socketcall:socketcall(int, unsigned long*) -1,-1,102
|
||||||
|
|
||||||
# scheduler & real-time
|
# scheduler & real-time
|
||||||
int sched_setscheduler(pid_t pid, int policy, const struct sched_param *param) 156
|
int sched_setscheduler(pid_t pid, int policy, const struct sched_param *param) 156
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ __socketcall:
|
|||||||
|
|
||||||
/* check return value */
|
/* check return value */
|
||||||
cmp/pz r0
|
cmp/pz r0
|
||||||
bt __NR___socketcall_end
|
bt __NR_socketcall_end
|
||||||
|
|
||||||
/* keep error number */
|
/* keep error number */
|
||||||
sts.l pr, @-r15
|
sts.l pr, @-r15
|
||||||
@@ -23,10 +23,10 @@ __socketcall:
|
|||||||
mov r0, r4
|
mov r0, r4
|
||||||
lds.l @r15+, pr
|
lds.l @r15+, pr
|
||||||
|
|
||||||
__NR___socketcall_end:
|
__NR_socketcall_end:
|
||||||
rts
|
rts
|
||||||
nop
|
nop
|
||||||
|
|
||||||
.align 2
|
.align 2
|
||||||
0: .long __NR___socketcall
|
0: .long __NR_socketcall
|
||||||
1: .long __set_syscall_errno
|
1: .long __set_syscall_errno
|
||||||
|
|||||||
@@ -280,7 +280,7 @@
|
|||||||
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
|
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
|
||||||
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
|
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
|
||||||
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
|
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
|
||||||
#define __NR___socketcall (__NR_SYSCALL_BASE + 102)
|
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
|
||||||
#define __NR_getcpu (__NR_SYSCALL_BASE + 318)
|
#define __NR_getcpu (__NR_SYSCALL_BASE + 318)
|
||||||
#define __NR_ioprio_set (__NR_SYSCALL_BASE + 288)
|
#define __NR_ioprio_set (__NR_SYSCALL_BASE + 288)
|
||||||
#define __NR_ioprio_get (__NR_SYSCALL_BASE + 289)
|
#define __NR_ioprio_get (__NR_SYSCALL_BASE + 289)
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from utils import *
|
|||||||
|
|
||||||
noUpdate = 1
|
noUpdate = 1
|
||||||
|
|
||||||
def cleanupFile( path ):
|
def cleanupFile( path, original_path=kernel_original_path ):
|
||||||
"""reads an original header and perform the cleanup operation on it
|
"""reads an original header and perform the cleanup operation on it
|
||||||
this functions returns the destination path and the clean header
|
this functions returns the destination path and the clean header
|
||||||
as a single string"""
|
as a single string"""
|
||||||
@@ -26,7 +26,6 @@ def cleanupFile( path ):
|
|||||||
sys.stderr.write( "warning: not a file: %s\n" % path )
|
sys.stderr.write( "warning: not a file: %s\n" % path )
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
original_path = kernel_original_path
|
|
||||||
if os.path.commonprefix( [ src_path, original_path ] ) != original_path:
|
if os.path.commonprefix( [ src_path, original_path ] ) != original_path:
|
||||||
if noUpdate:
|
if noUpdate:
|
||||||
panic( "file is not in 'original' directory: %s\n" % path );
|
panic( "file is not in 'original' directory: %s\n" % path );
|
||||||
@@ -54,27 +53,27 @@ def cleanupFile( path ):
|
|||||||
else:
|
else:
|
||||||
dst_path = "common/" + src_path
|
dst_path = "common/" + src_path
|
||||||
|
|
||||||
dst_path = os.path.normpath( original_path + "/../" + dst_path )
|
dst_path = os.path.normpath( kernel_cleaned_path + "/" + dst_path )
|
||||||
|
|
||||||
# now, let's parse the file
|
# now, let's parse the file
|
||||||
#
|
#
|
||||||
list = cpp.BlockParser().parseFile(path)
|
blocks = cpp.BlockParser().parseFile(path)
|
||||||
if not list:
|
if not blocks:
|
||||||
sys.stderr.write( "error: can't parse '%s'" % path )
|
sys.stderr.write( "error: can't parse '%s'" % path )
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
list.optimizeMacros( kernel_known_macros )
|
blocks.optimizeMacros( kernel_known_macros )
|
||||||
list.optimizeIf01()
|
blocks.optimizeIf01()
|
||||||
list.removeVarsAndFuncs( statics )
|
blocks.removeVarsAndFuncs( statics )
|
||||||
list.removeComments()
|
blocks.replaceTokens( kernel_token_replacements )
|
||||||
list.removeEmptyLines()
|
blocks.removeComments()
|
||||||
list.removeMacroDefines( kernel_ignored_macros )
|
blocks.removeMacroDefines( kernel_ignored_macros )
|
||||||
list.insertDisclaimer( kernel.kernel_disclaimer )
|
blocks.removeWhiteSpace()
|
||||||
list.replaceTokens( kernel_token_replacements )
|
|
||||||
|
|
||||||
out = StringOutput()
|
out = StringOutput()
|
||||||
list.write(out)
|
out.write( kernel_disclaimer )
|
||||||
|
blocks.writeWithWarning(out, kernel_warning, 4)
|
||||||
return dst_path, out.get()
|
return dst_path, out.get()
|
||||||
|
|
||||||
|
|
||||||
@@ -92,12 +91,15 @@ if __name__ == "__main__":
|
|||||||
if the content has changed. with this, you can pass more
|
if the content has changed. with this, you can pass more
|
||||||
than one file on the command-line
|
than one file on the command-line
|
||||||
|
|
||||||
|
-k<path> specify path of original kernel headers
|
||||||
|
-d<path> specify path of cleaned kernel headers
|
||||||
|
|
||||||
<header_path> must be in a subdirectory of 'original'
|
<header_path> must be in a subdirectory of 'original'
|
||||||
""" % os.path.basename(sys.argv[0])
|
""" % os.path.basename(sys.argv[0])
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
optlist, args = getopt.getopt( sys.argv[1:], 'uvk:' )
|
optlist, args = getopt.getopt( sys.argv[1:], 'uvk:d:' )
|
||||||
except:
|
except:
|
||||||
# unrecognized option
|
# unrecognized option
|
||||||
sys.stderr.write( "error: unrecognized option\n" )
|
sys.stderr.write( "error: unrecognized option\n" )
|
||||||
@@ -111,6 +113,8 @@ if __name__ == "__main__":
|
|||||||
D_setlevel(1)
|
D_setlevel(1)
|
||||||
elif opt == '-k':
|
elif opt == '-k':
|
||||||
kernel_original_path = arg
|
kernel_original_path = arg
|
||||||
|
elif opt == '-d':
|
||||||
|
kernel_cleaned_path = arg
|
||||||
|
|
||||||
if len(args) == 0:
|
if len(args) == 0:
|
||||||
usage()
|
usage()
|
||||||
@@ -143,9 +147,6 @@ if __name__ == "__main__":
|
|||||||
print "cleaning: %-*s -> %-*s (%s)" % ( 35, path, 35, dst_path, r )
|
print "cleaning: %-*s -> %-*s (%s)" % ( 35, path, 35, dst_path, r )
|
||||||
|
|
||||||
|
|
||||||
if os.environ.has_key("ANDROID_PRODUCT_OUT"):
|
b.updateGitFiles()
|
||||||
b.updateP4Files()
|
|
||||||
else:
|
|
||||||
b.updateFiles()
|
|
||||||
|
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|||||||
@@ -1529,7 +1529,7 @@ def test_CppExpr():
|
|||||||
|
|
||||||
class Block:
|
class Block:
|
||||||
"""a class used to model a block of input source text. there are two block types:
|
"""a class used to model a block of input source text. there are two block types:
|
||||||
- direcive blocks: contain the tokens of a single pre-processor directive (e.g. #if)
|
- directive blocks: contain the tokens of a single pre-processor directive (e.g. #if)
|
||||||
- text blocks, contain the tokens of non-directive blocks
|
- text blocks, contain the tokens of non-directive blocks
|
||||||
|
|
||||||
the cpp parser class below will transform an input source file into a list of Block
|
the cpp parser class below will transform an input source file into a list of Block
|
||||||
@@ -1609,6 +1609,91 @@ class Block:
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def removeWhiteSpace(self):
|
||||||
|
# Remove trailing whitespace and empty lines
|
||||||
|
# All whitespace is also contracted to a single space
|
||||||
|
if self.directive != None:
|
||||||
|
return
|
||||||
|
|
||||||
|
tokens = []
|
||||||
|
line = 0 # index of line start
|
||||||
|
space = -1 # index of first space, or -1
|
||||||
|
ii = 0
|
||||||
|
nn = len(self.tokens)
|
||||||
|
while ii < nn:
|
||||||
|
tok = self.tokens[ii]
|
||||||
|
|
||||||
|
# If we find a space, record its position if this is the first
|
||||||
|
# one the line start or the previous character. Don't append
|
||||||
|
# anything to tokens array yet though.
|
||||||
|
if tok.id == tokSPACE:
|
||||||
|
if space < 0:
|
||||||
|
space = ii
|
||||||
|
ii += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If this is a line space, ignore the spaces we found previously
|
||||||
|
# on the line, and remove empty lines.
|
||||||
|
if tok.id == tokLN:
|
||||||
|
old_line = line
|
||||||
|
old_space = space
|
||||||
|
#print "N line=%d space=%d ii=%d" % (line, space, ii)
|
||||||
|
ii += 1
|
||||||
|
line = ii
|
||||||
|
space = -1
|
||||||
|
if old_space == old_line: # line only contains spaces
|
||||||
|
#print "-s"
|
||||||
|
continue
|
||||||
|
if ii-1 == old_line: # line is empty
|
||||||
|
#print "-e"
|
||||||
|
continue
|
||||||
|
tokens.append(tok)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Other token, append any space range if any, converting each
|
||||||
|
# one to a single space character, then append the token.
|
||||||
|
if space >= 0:
|
||||||
|
jj = space
|
||||||
|
space = -1
|
||||||
|
while jj < ii:
|
||||||
|
tok2 = self.tokens[jj]
|
||||||
|
tok2.value = " "
|
||||||
|
tokens.append(tok2)
|
||||||
|
jj += 1
|
||||||
|
|
||||||
|
tokens.append(tok)
|
||||||
|
ii += 1
|
||||||
|
|
||||||
|
self.tokens = tokens
|
||||||
|
|
||||||
|
def writeWithWarning(self,out,warning,left_count,repeat_count):
|
||||||
|
# removeWhiteSpace() will sometimes creates non-directive blocks
|
||||||
|
# without any tokens. These come from blocks that only contained
|
||||||
|
# empty lines and spaces. They should not be printed in the final
|
||||||
|
# output, and then should not be counted for this operation.
|
||||||
|
#
|
||||||
|
if not self.directive and self.tokens == []:
|
||||||
|
return left_count
|
||||||
|
|
||||||
|
if self.directive:
|
||||||
|
out.write(str(self) + "\n")
|
||||||
|
left_count -= 1
|
||||||
|
if left_count == 0:
|
||||||
|
out.write(warning)
|
||||||
|
left_count = repeat_count
|
||||||
|
|
||||||
|
else:
|
||||||
|
for tok in self.tokens:
|
||||||
|
out.write(str(tok))
|
||||||
|
if tok.id == tokLN:
|
||||||
|
left_count -= 1
|
||||||
|
if left_count == 0:
|
||||||
|
out.write(warning)
|
||||||
|
left_count = repeat_count
|
||||||
|
|
||||||
|
return left_count
|
||||||
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
"""generate the representation of a given block"""
|
"""generate the representation of a given block"""
|
||||||
if self.directive:
|
if self.directive:
|
||||||
@@ -1651,7 +1736,6 @@ class Block:
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class BlockList:
|
class BlockList:
|
||||||
"""a convenience class used to hold and process a list of blocks returned by
|
"""a convenience class used to hold and process a list of blocks returned by
|
||||||
the cpp parser"""
|
the cpp parser"""
|
||||||
@@ -1694,6 +1778,10 @@ class BlockList:
|
|||||||
if b.isIf():
|
if b.isIf():
|
||||||
b.expr.removePrefixed(prefix,names)
|
b.expr.removePrefixed(prefix,names)
|
||||||
|
|
||||||
|
def removeWhiteSpace(self):
|
||||||
|
for b in self.blocks:
|
||||||
|
b.removeWhiteSpace()
|
||||||
|
|
||||||
def optimizeAll(self,macros):
|
def optimizeAll(self,macros):
|
||||||
self.optimizeMacros(macros)
|
self.optimizeMacros(macros)
|
||||||
self.optimizeIf01()
|
self.optimizeIf01()
|
||||||
@@ -1713,72 +1801,17 @@ class BlockList:
|
|||||||
def write(self,out):
|
def write(self,out):
|
||||||
out.write(str(self))
|
out.write(str(self))
|
||||||
|
|
||||||
|
def writeWithWarning(self,out,warning,repeat_count):
|
||||||
|
left_count = repeat_count
|
||||||
|
for b in self.blocks:
|
||||||
|
left_count = b.writeWithWarning(out,warning,left_count,repeat_count)
|
||||||
|
|
||||||
def removeComments(self):
|
def removeComments(self):
|
||||||
for b in self.blocks:
|
for b in self.blocks:
|
||||||
for tok in b.tokens:
|
for tok in b.tokens:
|
||||||
if tok.id == tokSPACE:
|
if tok.id == tokSPACE:
|
||||||
tok.value = " "
|
tok.value = " "
|
||||||
|
|
||||||
def removeEmptyLines(self):
|
|
||||||
# state = 1 => previous line was tokLN
|
|
||||||
# state = 0 => previous line was directive
|
|
||||||
state = 1
|
|
||||||
for b in self.blocks:
|
|
||||||
if b.isDirective():
|
|
||||||
#print "$$$ directive %s" % str(b)
|
|
||||||
state = 0
|
|
||||||
else:
|
|
||||||
# a tokLN followed by spaces is replaced by a single tokLN
|
|
||||||
# several successive tokLN are replaced by a single one
|
|
||||||
#
|
|
||||||
dst = []
|
|
||||||
src = b.tokens
|
|
||||||
n = len(src)
|
|
||||||
i = 0
|
|
||||||
#print "$$$ parsing %s" % repr(src)
|
|
||||||
while i < n:
|
|
||||||
# find final tokLN
|
|
||||||
j = i
|
|
||||||
while j < n and src[j].id != tokLN:
|
|
||||||
j += 1
|
|
||||||
|
|
||||||
if j >= n:
|
|
||||||
# uhhh
|
|
||||||
dst += src[i:]
|
|
||||||
break
|
|
||||||
|
|
||||||
if src[i].id == tokSPACE:
|
|
||||||
k = i+1
|
|
||||||
while src[k].id == tokSPACE:
|
|
||||||
k += 1
|
|
||||||
|
|
||||||
if k == j: # empty lines with spaces in it
|
|
||||||
i = j # remove the spaces
|
|
||||||
|
|
||||||
if i == j:
|
|
||||||
# an empty line
|
|
||||||
if state == 1:
|
|
||||||
i += 1 # remove it
|
|
||||||
else:
|
|
||||||
state = 1
|
|
||||||
dst.append(src[i])
|
|
||||||
i += 1
|
|
||||||
else:
|
|
||||||
# this line is not empty, remove trailing spaces
|
|
||||||
k = j
|
|
||||||
while k > i and src[k-1].id == tokSPACE:
|
|
||||||
k -= 1
|
|
||||||
|
|
||||||
nn = i
|
|
||||||
while nn < k:
|
|
||||||
dst.append(src[nn])
|
|
||||||
nn += 1
|
|
||||||
dst.append(src[j])
|
|
||||||
state = 0
|
|
||||||
i = j+1
|
|
||||||
|
|
||||||
b.tokens = dst
|
|
||||||
|
|
||||||
def removeVarsAndFuncs(self,knownStatics=set()):
|
def removeVarsAndFuncs(self,knownStatics=set()):
|
||||||
"""remove all extern and static declarations corresponding
|
"""remove all extern and static declarations corresponding
|
||||||
to variable and function declarations. we only accept typedefs
|
to variable and function declarations. we only accept typedefs
|
||||||
@@ -1789,66 +1822,118 @@ class BlockList:
|
|||||||
which is useful for optimized byteorder swap functions and
|
which is useful for optimized byteorder swap functions and
|
||||||
stuff like that.
|
stuff like that.
|
||||||
"""
|
"""
|
||||||
# state = 1 => typedef/struct encountered
|
|
||||||
# state = 2 => vars or func declaration encountered, skipping until ";"
|
|
||||||
# state = 0 => normal (i.e. LN + spaces)
|
# state = 0 => normal (i.e. LN + spaces)
|
||||||
|
# state = 1 => typedef/struct encountered, ends with ";"
|
||||||
|
# state = 2 => var declaration encountered, ends with ";"
|
||||||
|
# state = 3 => func declaration encountered, ends with "}"
|
||||||
state = 0
|
state = 0
|
||||||
depth = 0
|
depth = 0
|
||||||
blocks2 = []
|
blocks2 = []
|
||||||
|
skipTokens = False
|
||||||
for b in self.blocks:
|
for b in self.blocks:
|
||||||
if b.isDirective():
|
if b.isDirective():
|
||||||
blocks2.append(b)
|
blocks2.append(b)
|
||||||
else:
|
else:
|
||||||
n = len(b.tokens)
|
n = len(b.tokens)
|
||||||
i = 0
|
i = 0
|
||||||
first = 0
|
if skipTokens:
|
||||||
if state == 2:
|
|
||||||
first = n
|
first = n
|
||||||
|
else:
|
||||||
|
first = 0
|
||||||
while i < n:
|
while i < n:
|
||||||
tok = b.tokens[i]
|
tok = b.tokens[i]
|
||||||
if state == 0:
|
tokid = tok.id
|
||||||
bad = 0
|
# If we are not looking for the start of a new
|
||||||
if tok.id in [tokLN, tokSPACE]:
|
# type/var/func, then skip over tokens until
|
||||||
pass
|
# we find our terminator, managing the depth of
|
||||||
elif tok.value in [ 'struct', 'typedef', 'enum', 'union', '__extension__' ]:
|
# accolades as we go.
|
||||||
state = 1
|
if state > 0:
|
||||||
else:
|
terminator = False
|
||||||
if tok.value in [ 'static', 'extern', '__KINLINE' ]:
|
if tokid == '{':
|
||||||
j = i+1
|
|
||||||
ident = ""
|
|
||||||
while j < n and not (b.tokens[j].id in [ '(', ';' ]):
|
|
||||||
if b.tokens[j].id == tokIDENT:
|
|
||||||
ident = b.tokens[j].value
|
|
||||||
j += 1
|
|
||||||
if j < n and ident in knownStatics:
|
|
||||||
# this is a known static, we're going to keep its
|
|
||||||
# definition in the final output
|
|
||||||
state = 1
|
|
||||||
else:
|
|
||||||
#print "### skip static '%s'" % ident
|
|
||||||
pass
|
|
||||||
|
|
||||||
if state == 0:
|
|
||||||
if i > first:
|
|
||||||
#print "### intermediate from '%s': '%s'" % (tok.value, repr(b.tokens[first:i]))
|
|
||||||
blocks2.append( Block(b.tokens[first:i]) )
|
|
||||||
state = 2
|
|
||||||
first = n
|
|
||||||
|
|
||||||
else: # state > 0
|
|
||||||
if tok.id == '{':
|
|
||||||
depth += 1
|
depth += 1
|
||||||
|
elif tokid == '}':
|
||||||
elif tok.id == '}':
|
|
||||||
if depth > 0:
|
if depth > 0:
|
||||||
depth -= 1
|
depth -= 1
|
||||||
|
if (depth == 0) and (state == 3):
|
||||||
|
terminator = True
|
||||||
|
elif tokid == ';' and depth == 0:
|
||||||
|
terminator = True
|
||||||
|
|
||||||
elif depth == 0 and tok.id == ';':
|
if terminator:
|
||||||
if state == 2:
|
# we found the terminator
|
||||||
first = i+1
|
|
||||||
state = 0
|
state = 0
|
||||||
|
if skipTokens:
|
||||||
|
skipTokens = False
|
||||||
|
first = i+1
|
||||||
|
|
||||||
i += 1
|
i = i+1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# We are looking for the start of a new type/func/var
|
||||||
|
# ignore whitespace
|
||||||
|
if tokid in [tokLN, tokSPACE]:
|
||||||
|
i = i+1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Is it a new type definition, then start recording it
|
||||||
|
if tok.value in [ 'struct', 'typedef', 'enum', 'union', '__extension__' ]:
|
||||||
|
#print "$$$ keep type declr" + repr(b.tokens[i:])
|
||||||
|
state = 1
|
||||||
|
i = i+1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Is it a variable or function definition. If so, first
|
||||||
|
# try to determine which type it is, and also extract
|
||||||
|
# its name.
|
||||||
|
#
|
||||||
|
# We're going to parse the next tokens of the same block
|
||||||
|
# until we find a semi-column or a left parenthesis.
|
||||||
|
#
|
||||||
|
# The semi-column corresponds to a variable definition,
|
||||||
|
# the left-parenthesis to a function definition.
|
||||||
|
#
|
||||||
|
# We also assume that the var/func name is the last
|
||||||
|
# identifier before the terminator.
|
||||||
|
#
|
||||||
|
j = i+1
|
||||||
|
ident = ""
|
||||||
|
while j < n:
|
||||||
|
tokid = b.tokens[j].id
|
||||||
|
if tokid == '(': # a function declaration
|
||||||
|
state = 3
|
||||||
|
break
|
||||||
|
elif tokid == ';': # a variable declaration
|
||||||
|
state = 2
|
||||||
|
break
|
||||||
|
if tokid == tokIDENT:
|
||||||
|
ident = b.tokens[j].value
|
||||||
|
j += 1
|
||||||
|
|
||||||
|
if j >= n:
|
||||||
|
# This can only happen when the declaration
|
||||||
|
# does not end on the current block (e.g. with
|
||||||
|
# a directive mixed inside it.
|
||||||
|
#
|
||||||
|
# We will treat it as malformed because
|
||||||
|
# it's very hard to recover from this case
|
||||||
|
# without making our parser much more
|
||||||
|
# complex.
|
||||||
|
#
|
||||||
|
#print "### skip unterminated static '%s'" % ident
|
||||||
|
break
|
||||||
|
|
||||||
|
if ident in knownStatics:
|
||||||
|
#print "### keep var/func '%s': %s" % (ident,repr(b.tokens[i:j]))
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# We're going to skip the tokens for this declaration
|
||||||
|
#print "### skip variable /func'%s': %s" % (ident,repr(b.tokens[i:j]))
|
||||||
|
if i > first:
|
||||||
|
blocks2.append( Block(b.tokens[first:i]))
|
||||||
|
skipTokens = True
|
||||||
|
first = n
|
||||||
|
|
||||||
|
i = i+1
|
||||||
|
|
||||||
if i > first:
|
if i > first:
|
||||||
#print "### final '%s'" % repr(b.tokens[first:i])
|
#print "### final '%s'" % repr(b.tokens[first:i])
|
||||||
|
|||||||
@@ -16,7 +16,11 @@ kernel_dirs = [ "linux", "asm", "asm-generic", "mtd" ]
|
|||||||
|
|
||||||
# path to the directory containing the original kernel headers
|
# path to the directory containing the original kernel headers
|
||||||
#
|
#
|
||||||
kernel_original_path = os.path.normpath( find_program_dir() + '/../original' )
|
kernel_original_path = os.path.normpath( find_program_dir() + '/../../../../external/kernel-headers/original' )
|
||||||
|
|
||||||
|
# path to the default location of the cleaned-up headers
|
||||||
|
#
|
||||||
|
kernel_cleaned_path = os.path.normpath( find_program_dir() + '/..' )
|
||||||
|
|
||||||
# a special value that is used to indicate that a given macro is known to be
|
# a special value that is used to indicate that a given macro is known to be
|
||||||
# undefined during optimization
|
# undefined during optimization
|
||||||
@@ -112,6 +116,18 @@ kernel_disclaimer = """\
|
|||||||
*** structures, and macros generated from the original header, and thus,
|
*** structures, and macros generated from the original header, and thus,
|
||||||
*** contains no copyrightable information.
|
*** contains no copyrightable information.
|
||||||
***
|
***
|
||||||
|
*** To edit the content of this header, modify the corresponding
|
||||||
|
*** source file (e.g. under external/kernel-headers/original/) then
|
||||||
|
*** run bionic/libc/kernel/tools/update_all.py
|
||||||
|
***
|
||||||
|
*** Any manual change here will be lost the next time this script will
|
||||||
|
*** be run. You've been warned!
|
||||||
|
***
|
||||||
****************************************************************************
|
****************************************************************************
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# This is the warning line that will be inserted every N-th line in the output
|
||||||
|
kernel_warning = """\
|
||||||
|
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
|
||||||
|
"""
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# this program is used to find source code that includes linux kernel headers directly
|
# this program is used to find source code that includes linux kernel headers directly
|
||||||
# (e.g. with #include <linux/...> or #include <asm/...>)
|
# (e.g. with #include <linux/...> or #include <asm/...>)
|
||||||
#
|
#
|
||||||
# then it lists
|
# then it lists them on the standard output.
|
||||||
|
|
||||||
import sys, cpp, glob, os, re, getopt, kernel
|
import sys, cpp, glob, os, re, getopt, kernel
|
||||||
from utils import *
|
from utils import *
|
||||||
@@ -12,20 +12,14 @@ from defaults import *
|
|||||||
program_dir = find_program_dir()
|
program_dir = find_program_dir()
|
||||||
|
|
||||||
wanted_archs = kernel_archs
|
wanted_archs = kernel_archs
|
||||||
wanted_include = os.path.normpath(program_dir + '/../original')
|
wanted_config = None
|
||||||
wanted_config = os.path.normpath(program_dir + '/../original/config')
|
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
print """\
|
print """\
|
||||||
usage: find_headers.py [options] (file|directory|@listfile)+
|
usage: find_headers.py [options] <kernel-root> (file|directory|@listfile)+
|
||||||
|
|
||||||
options:
|
options:
|
||||||
-d <include-dir> specify alternate kernel headers
|
-c <file> specify .config file (none by default)
|
||||||
'include' directory
|
|
||||||
('%s' by default)
|
|
||||||
|
|
||||||
-c <file> specify alternate .config file
|
|
||||||
('%s' by default)
|
|
||||||
|
|
||||||
-a <archs> used to specify an alternative list
|
-a <archs> used to specify an alternative list
|
||||||
of architectures to support
|
of architectures to support
|
||||||
@@ -37,12 +31,12 @@ def usage():
|
|||||||
by a set of source files or directories containing them. the search
|
by a set of source files or directories containing them. the search
|
||||||
is recursive to find *all* required files.
|
is recursive to find *all* required files.
|
||||||
|
|
||||||
""" % ( wanted_include, wanted_config, string.join(kernel_archs,",") )
|
""" % ( string.join(kernel_archs,",") )
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
optlist, args = getopt.getopt( sys.argv[1:], 'vc:d:a:' )
|
optlist, args = getopt.getopt( sys.argv[1:], 'vc:d:a:k:' )
|
||||||
except:
|
except:
|
||||||
# unrecognized option
|
# unrecognized option
|
||||||
print "error: unrecognized option"
|
print "error: unrecognized option"
|
||||||
@@ -51,8 +45,6 @@ except:
|
|||||||
for opt, arg in optlist:
|
for opt, arg in optlist:
|
||||||
if opt == '-a':
|
if opt == '-a':
|
||||||
wanted_archs = string.split(arg,',')
|
wanted_archs = string.split(arg,',')
|
||||||
elif opt == '-d':
|
|
||||||
wanted_include = arg
|
|
||||||
elif opt == '-c':
|
elif opt == '-c':
|
||||||
wanted_config = arg
|
wanted_config = arg
|
||||||
elif opt == '-v':
|
elif opt == '-v':
|
||||||
@@ -62,10 +54,10 @@ for opt, arg in optlist:
|
|||||||
else:
|
else:
|
||||||
usage()
|
usage()
|
||||||
|
|
||||||
if len(args) < 1:
|
if len(args) < 2:
|
||||||
usage()
|
usage()
|
||||||
|
|
||||||
kernel_root = wanted_include
|
kernel_root = args[0]
|
||||||
if not os.path.exists(kernel_root):
|
if not os.path.exists(kernel_root):
|
||||||
sys.stderr.write( "error: directory '%s' does not exist\n" % kernel_root )
|
sys.stderr.write( "error: directory '%s' does not exist\n" % kernel_root )
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@@ -74,26 +66,26 @@ if not os.path.isdir(kernel_root):
|
|||||||
sys.stderr.write( "error: '%s' is not a directory\n" % kernel_root )
|
sys.stderr.write( "error: '%s' is not a directory\n" % kernel_root )
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not os.path.isdir(kernel_root+"/linux"):
|
if not os.path.isdir(kernel_root+"/include/linux"):
|
||||||
sys.stderr.write( "error: '%s' does not have a 'linux' directory\n" % kernel_root )
|
sys.stderr.write( "error: '%s' does not have an 'include/linux' directory\n" % kernel_root )
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not os.path.exists(wanted_config):
|
if wanted_config:
|
||||||
|
if not os.path.exists(wanted_config):
|
||||||
sys.stderr.write( "error: file '%s' does not exist\n" % wanted_config )
|
sys.stderr.write( "error: file '%s' does not exist\n" % wanted_config )
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not os.path.isfile(wanted_config):
|
if not os.path.isfile(wanted_config):
|
||||||
sys.stderr.write( "error: '%s' is not a file\n" % wanted_config )
|
sys.stderr.write( "error: '%s' is not a file\n" % wanted_config )
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# find all architectures in the kernel tree
|
# find all architectures in the kernel tree
|
||||||
re_asm_ = re.compile(r"asm-(\w+)")
|
|
||||||
archs = []
|
archs = []
|
||||||
for dir in os.listdir(kernel_root):
|
for archdir in os.listdir(kernel_root+"/arch"):
|
||||||
m = re_asm_.match(dir)
|
if os.path.exists("%s/arch/%s/include/asm" % (kernel_root, archdir)):
|
||||||
if m:
|
if verbose:
|
||||||
if verbose: print ">> found kernel arch '%s'" % m.group(1)
|
print "Found arch '%s'" % archdir
|
||||||
archs.append(m.group(1))
|
archs.append(archdir)
|
||||||
|
|
||||||
# if we're using the 'kernel_headers' directory, there is only asm/
|
# if we're using the 'kernel_headers' directory, there is only asm/
|
||||||
# and no other asm-<arch> directories (arm is assumed, which sucks)
|
# and no other asm-<arch> directories (arm is assumed, which sucks)
|
||||||
@@ -126,6 +118,7 @@ if wanted_archs != None:
|
|||||||
|
|
||||||
# helper function used to walk the user files
|
# helper function used to walk the user files
|
||||||
def parse_file(path, parser):
|
def parse_file(path, parser):
|
||||||
|
#print "parse %s" % path
|
||||||
parser.parseFile(path)
|
parser.parseFile(path)
|
||||||
|
|
||||||
|
|
||||||
@@ -136,6 +129,7 @@ def parse_file(path, parser):
|
|||||||
# try to read the config file
|
# try to read the config file
|
||||||
try:
|
try:
|
||||||
cparser = kernel.ConfigParser()
|
cparser = kernel.ConfigParser()
|
||||||
|
if wanted_config:
|
||||||
cparser.parseFile( wanted_config )
|
cparser.parseFile( wanted_config )
|
||||||
except:
|
except:
|
||||||
sys.stderr.write( "error: can't parse '%s'" % wanted_config )
|
sys.stderr.write( "error: can't parse '%s'" % wanted_config )
|
||||||
@@ -145,7 +139,8 @@ kernel_config = cparser.getDefinitions()
|
|||||||
|
|
||||||
# first, obtain the list of kernel files used by our clients
|
# first, obtain the list of kernel files used by our clients
|
||||||
fparser = kernel.HeaderScanner()
|
fparser = kernel.HeaderScanner()
|
||||||
walk_source_files( args, parse_file, fparser, excludes=["kernel_headers"] )
|
dir_excludes=[".repo","external/kernel-headers","ndk","out","prebuilt","bionic/libc/kernel","development/ndk","external/qemu/distrib"]
|
||||||
|
walk_source_files( args[1:], parse_file, fparser, excludes=["./"+f for f in dir_excludes] )
|
||||||
headers = fparser.getHeaders()
|
headers = fparser.getHeaders()
|
||||||
files = fparser.getFiles()
|
files = fparser.getFiles()
|
||||||
|
|
||||||
@@ -170,6 +165,6 @@ if 0: # just for debugging
|
|||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
for h in sorted(headers):
|
for h in sorted(headers):
|
||||||
print h
|
print "%s" % h
|
||||||
|
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|||||||
@@ -55,8 +55,11 @@ class HeaderScanner:
|
|||||||
# <asm-generic/*>
|
# <asm-generic/*>
|
||||||
# <mtd/*>
|
# <mtd/*>
|
||||||
#
|
#
|
||||||
re_combined =\
|
re_combined_str=\
|
||||||
re.compile(r"^.*<((%s)/[\d\w_\+\.\-/]*)>.*$" % string.join(kernel_dirs,"|") )
|
r"^.*<((%s)/[\d\w_\+\.\-/]*)>.*$" % string.join(kernel_dirs,"|")
|
||||||
|
|
||||||
|
re_combined = re.compile(re_combined_str)
|
||||||
|
|
||||||
# some kernel files choose to include files with relative paths (x86 32/64
|
# some kernel files choose to include files with relative paths (x86 32/64
|
||||||
# dispatch for instance)
|
# dispatch for instance)
|
||||||
re_rel_dir = re.compile(r'^.*"([\d\w_\+\.\-/]+)".*$')
|
re_rel_dir = re.compile(r'^.*"([\d\w_\+\.\-/]+)".*$')
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from utils import *
|
|||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
print """\
|
print """\
|
||||||
usage: %(progname)s
|
usage: %(progname)s [kernel-original-path]
|
||||||
|
|
||||||
this program is used to update all the auto-generated clean headers
|
this program is used to update all the auto-generated clean headers
|
||||||
used by the Bionic C library. it assumes the following:
|
used by the Bionic C library. it assumes the following:
|
||||||
@@ -31,13 +31,19 @@ except:
|
|||||||
sys.stderr.write( "error: unrecognized option\n" )
|
sys.stderr.write( "error: unrecognized option\n" )
|
||||||
usage()
|
usage()
|
||||||
|
|
||||||
if len(optlist) > 0 or len(args) > 0:
|
if len(optlist) > 0 or len(args) > 1:
|
||||||
usage()
|
usage()
|
||||||
|
|
||||||
progdir = find_program_dir()
|
progdir = find_program_dir()
|
||||||
original_dir = os.path.normpath( progdir + "/../original" )
|
|
||||||
if not os.path.isdir( original_dir ):
|
if len(args) == 1:
|
||||||
panic( "required directory does not exists: %s\n" % original_dir )
|
original_dir = arg[0]
|
||||||
|
if not os.path.isdir(original_dir):
|
||||||
|
panic( "Not a directory: %s" % original_dir )
|
||||||
|
else:
|
||||||
|
original_dir = kernel_original_path
|
||||||
|
if not os.path.isdir(original_dir):
|
||||||
|
panic( "Missing directory, please specify one through command-line: %s" % original_dir )
|
||||||
|
|
||||||
# find all source files in 'original'
|
# find all source files in 'original'
|
||||||
#
|
#
|
||||||
@@ -57,29 +63,36 @@ b.readDir( os.path.normpath( progdir + "/../common" ) )
|
|||||||
|
|
||||||
#print "OLD " + repr(b.old_files)
|
#print "OLD " + repr(b.old_files)
|
||||||
|
|
||||||
|
oldlen = 120
|
||||||
for path in sources:
|
for path in sources:
|
||||||
dst_path, newdata = clean_header.cleanupFile(path)
|
dst_path, newdata = clean_header.cleanupFile(path, original_dir)
|
||||||
if not dst_path:
|
if not dst_path:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
b.readFile( dst_path )
|
b.readFile( dst_path )
|
||||||
r = b.editFile( dst_path, newdata )
|
r = b.editFile( dst_path, newdata )
|
||||||
if r == 0:
|
if r == 0:
|
||||||
r = "unchanged"
|
state = "unchanged"
|
||||||
elif r == 1:
|
elif r == 1:
|
||||||
r = "edited"
|
state = "edited"
|
||||||
else:
|
else:
|
||||||
r = "added"
|
state = "added"
|
||||||
|
|
||||||
print "cleaning: %-*s -> %-*s (%s)" % ( 35, path, 35, dst_path, r )
|
str = "cleaning: %-*s -> %-*s (%s)" % ( 35, "<original>" + path[len(original_dir):], 35, dst_path, state )
|
||||||
|
if sys.stdout.isatty():
|
||||||
|
print "%-*s" % (oldlen,str),
|
||||||
|
if (r == 0):
|
||||||
|
print "\r",
|
||||||
|
else:
|
||||||
|
print "\n",
|
||||||
|
oldlen = 0
|
||||||
|
else:
|
||||||
|
print str
|
||||||
|
|
||||||
# We don't use Perforce anymore, but just in case, define ANDROID_USE_P4
|
oldlen = len(str)
|
||||||
# in your environment if you think you need it.
|
|
||||||
usePerforce = os.environ.has_key("ANDROID_USE_P4")
|
|
||||||
|
|
||||||
if usePerforce:
|
print "%-*s" % (oldlen,"Done!")
|
||||||
b.updateP4Files()
|
|
||||||
else:
|
b.updateGitFiles()
|
||||||
b.updateFiles()
|
|
||||||
|
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|||||||
@@ -231,6 +231,15 @@ def create_file_path(path):
|
|||||||
def walk_source_files(paths,callback,args,excludes=[]):
|
def walk_source_files(paths,callback,args,excludes=[]):
|
||||||
"""recursively walk a list of paths and files, only keeping the source files in directories"""
|
"""recursively walk a list of paths and files, only keeping the source files in directories"""
|
||||||
for path in paths:
|
for path in paths:
|
||||||
|
if len(path) > 0 and path[0] == '@':
|
||||||
|
# this is the name of another file, include it and parse it
|
||||||
|
path = path[1:]
|
||||||
|
if os.path.exists(path):
|
||||||
|
for line in open(path):
|
||||||
|
if len(line) > 0 and line[-1] == '\n':
|
||||||
|
line = line[:-1]
|
||||||
|
walk_source_files([line],callback,args,excludes)
|
||||||
|
continue
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
callback(path,args)
|
callback(path,args)
|
||||||
else:
|
else:
|
||||||
@@ -238,7 +247,7 @@ def walk_source_files(paths,callback,args,excludes=[]):
|
|||||||
#print "w-- %s (ex: %s)" % (repr((root,dirs)), repr(excludes))
|
#print "w-- %s (ex: %s)" % (repr((root,dirs)), repr(excludes))
|
||||||
if len(excludes):
|
if len(excludes):
|
||||||
for d in dirs[:]:
|
for d in dirs[:]:
|
||||||
if d in excludes:
|
if os.path.join(root,d) in excludes:
|
||||||
dirs.remove(d)
|
dirs.remove(d)
|
||||||
for f in files:
|
for f in files:
|
||||||
r, ext = os.path.splitext(f)
|
r, ext = os.path.splitext(f)
|
||||||
@@ -395,3 +404,19 @@ class BatchFileUpdater:
|
|||||||
D2("P4 DELETES: %s" % files)
|
D2("P4 DELETES: %s" % files)
|
||||||
o = commands.getoutput( "p4 delete " + files )
|
o = commands.getoutput( "p4 delete " + files )
|
||||||
D2( o )
|
D2( o )
|
||||||
|
|
||||||
|
def updateGitFiles(self):
|
||||||
|
adds, deletes, edits = self.getChanges()
|
||||||
|
|
||||||
|
if adds:
|
||||||
|
for dst in sorted(adds):
|
||||||
|
self._writeFile(dst)
|
||||||
|
commands.getoutput("git add " + " ".join(adds))
|
||||||
|
|
||||||
|
if deletes:
|
||||||
|
commands.getoutput("git rm " + " ".join(deletes))
|
||||||
|
|
||||||
|
if edits:
|
||||||
|
for dst in sorted(edits):
|
||||||
|
self._writeFile(dst)
|
||||||
|
commands.getoutput("git add " + " ".join(edits))
|
||||||
|
|||||||
@@ -105,8 +105,29 @@ def find_bionic_root():
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def find_original_kernel_headers():
|
||||||
|
"""try to find the directory containing the original kernel headers"""
|
||||||
|
bionic_root = find_bionic_root()
|
||||||
|
if not bionic_root:
|
||||||
|
D("Could not find Bionic root !!")
|
||||||
|
return None
|
||||||
|
|
||||||
|
path = os.path.normpath(bionic_root + "/../../external/kernel-headers/original")
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
D("Could not find %s" % (path))
|
||||||
|
return None
|
||||||
|
|
||||||
|
return path
|
||||||
|
|
||||||
def find_kernel_headers():
|
def find_kernel_headers():
|
||||||
"""try to find the directory containing the kernel headers for this machine"""
|
"""try to find the directory containing the kernel headers for this machine"""
|
||||||
|
|
||||||
|
# First try to find the original kernel headers.
|
||||||
|
ret = find_original_kernel_headers()
|
||||||
|
if ret:
|
||||||
|
D("found original kernel headers in: %s" % (ret))
|
||||||
|
return ret
|
||||||
|
|
||||||
status, version = commands.getstatusoutput( "uname -r" ) # get Linux kernel version
|
status, version = commands.getstatusoutput( "uname -r" ) # get Linux kernel version
|
||||||
if status != 0:
|
if status != 0:
|
||||||
D("could not execute 'uname -r' command properly")
|
D("could not execute 'uname -r' command properly")
|
||||||
@@ -116,14 +137,39 @@ def find_kernel_headers():
|
|||||||
if len(version) > 5 and version[-5:] == "-xenU":
|
if len(version) > 5 and version[-5:] == "-xenU":
|
||||||
version = version[:-5]
|
version = version[:-5]
|
||||||
|
|
||||||
path = "/usr/src/linux-headers-" + version
|
path = "/usr/src/linux-headers-" + version + "/include"
|
||||||
D("probing %s for kernel headers" % (path+"/include"))
|
D("probing %s for kernel headers" % (path))
|
||||||
ret = os.path.isdir( path )
|
ret = os.path.isdir( path )
|
||||||
if ret:
|
if ret:
|
||||||
D("found kernel headers in: %s" % (path + "/include"))
|
D("found kernel headers in: %s" % (path))
|
||||||
return path
|
return path
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def find_arch_header(kernel_headers,arch,header):
|
||||||
|
# First, try in <root>/arch/<arm>/include/<header>
|
||||||
|
# corresponding to the location in the kernel source tree for
|
||||||
|
# certain architectures (e.g. arm).
|
||||||
|
path = "%s/arch/%s/include/asm/%s" % (kernel_headers, arch, header)
|
||||||
|
D("Probing for %s" % path)
|
||||||
|
if os.path.exists(path):
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Try <root>/asm-<arch>/include/<header> corresponding to the location
|
||||||
|
# in the kernel source tree for other architectures (e.g. x86).
|
||||||
|
path = "%s/include/asm-%s/%s" % (kernel_headers, arch, header)
|
||||||
|
D("Probing for %s" % path)
|
||||||
|
if os.path.exists(path):
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Otherwise, look under <root>/asm-<arch>/<header> corresponding
|
||||||
|
# the original kernel headers directory
|
||||||
|
path = "%s/asm-%s/%s" % (kernel_headers, arch, header)
|
||||||
|
D("Probing for %s" % path)
|
||||||
|
if os.path.exists(path):
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
# parser for the SYSCALLS.TXT file
|
# parser for the SYSCALLS.TXT file
|
||||||
#
|
#
|
||||||
@@ -212,7 +258,12 @@ class SysCallsTxtParser:
|
|||||||
E("invalid syscall number in '%s'" % line)
|
E("invalid syscall number in '%s'" % line)
|
||||||
return
|
return
|
||||||
|
|
||||||
print str(syscall_id) + ':' + str(syscall_id2) + ':' + str(syscall_id3)
|
global verbose
|
||||||
|
if verbose >= 2:
|
||||||
|
if call_id < 0:
|
||||||
|
print "%s: %d,%d,%d" % (syscall_name, syscall_id, syscall_id2, syscall_id3)
|
||||||
|
else:
|
||||||
|
print "%s(%d): %d,%d,%d" % (syscall_name, call_id, syscall_id, syscall_id2, syscall_id3)
|
||||||
|
|
||||||
t = { "id" : syscall_id,
|
t = { "id" : syscall_id,
|
||||||
"id2" : syscall_id2,
|
"id2" : syscall_id2,
|
||||||
|
|||||||
@@ -40,8 +40,8 @@ def parse_command_line(args):
|
|||||||
if len(args) == 0:
|
if len(args) == 0:
|
||||||
linux_root = find_kernel_headers()
|
linux_root = find_kernel_headers()
|
||||||
if linux_root == None:
|
if linux_root == None:
|
||||||
print "could not locate this system kernel headers root directory, please"
|
print "Could not locate original or system kernel headers root directory."
|
||||||
print "specify one when calling this program, i.e. 'checksyscalls <headers-directory>'"
|
print "Please specify one when calling this program, i.e. 'checksyscalls <headers-directory>'"
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
print "using the following kernel headers root: '%s'" % linux_root
|
print "using the following kernel headers root: '%s'" % linux_root
|
||||||
else:
|
else:
|
||||||
@@ -112,62 +112,63 @@ def process_header(header_file,dict):
|
|||||||
|
|
||||||
arm_dict = {}
|
arm_dict = {}
|
||||||
x86_dict = {}
|
x86_dict = {}
|
||||||
|
superh_dict = {}
|
||||||
|
|
||||||
|
# remove trailing slash from the linux_root, if any
|
||||||
# remove trailing slash and '/include' from the linux_root, if any
|
|
||||||
if linux_root[-1] == '/':
|
if linux_root[-1] == '/':
|
||||||
linux_root = linux_root[:-1]
|
linux_root = linux_root[:-1]
|
||||||
|
|
||||||
if len(linux_root) > 8 and linux_root[-8:] == '/include':
|
arm_unistd = find_arch_header(linux_root, "arm", "unistd.h")
|
||||||
linux_root = linux_root[:-8]
|
if not arm_unistd:
|
||||||
|
print "WEIRD: Could not locate the ARM unistd.h kernel header file,"
|
||||||
arm_unistd = linux_root + "/include/asm-arm/unistd.h"
|
print "maybe using a different set of kernel headers might help."
|
||||||
if not os.path.exists(arm_unistd):
|
|
||||||
print "WEIRD: could not locate the ARM unistd.h header file"
|
|
||||||
print "tried searching in '%s'" % arm_unistd
|
|
||||||
print "maybe using a different set of kernel headers might help"
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# on recent kernels, asm-i386 and asm-x64_64 have been merged into asm-x86
|
# on recent kernels, asm-i386 and asm-x64_64 have been merged into asm-x86
|
||||||
# with two distinct unistd_32.h and unistd_64.h definition files.
|
# with two distinct unistd_32.h and unistd_64.h definition files.
|
||||||
# take care of this here
|
# take care of this here
|
||||||
#
|
#
|
||||||
x86_unistd = linux_root + "/include/asm-i386/unistd.h"
|
x86_unistd = find_arch_header(linux_root, "i386", "unistd.h")
|
||||||
if not os.path.exists(x86_unistd):
|
if not x86_unistd:
|
||||||
x86_unistd1 = x86_unistd
|
x86_unistd = find_arch_header(linux_root, "x86", "unistd_32.h")
|
||||||
x86_unistd = linux_root + "/include/asm-x86/unistd_32.h"
|
if not x86_unistd:
|
||||||
if not os.path.exists(x86_unistd):
|
print "WEIRD: Could not locate the i386/x86 unistd.h header file,"
|
||||||
print "WEIRD: could not locate the i386/x86 unistd.h header file"
|
print "maybe using a different set of kernel headers might help."
|
||||||
print "tried searching in '%s' and '%s'" % (x86_unistd1, x86_unistd)
|
|
||||||
print "maybe using a different set of kernel headers might help"
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
process_header( linux_root+"/include/asm-arm/unistd.h", arm_dict )
|
superh_unistd = find_arch_header(linux_root, "sh", "unistd_32.h")
|
||||||
|
if not superh_unistd:
|
||||||
|
print "WEIRD: Could not locate the SuperH unistd.h kernel header file,"
|
||||||
|
print "maybe using a different set of kernel headers might help."
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
process_header( arm_unistd, arm_dict )
|
||||||
process_header( x86_unistd, x86_dict )
|
process_header( x86_unistd, x86_dict )
|
||||||
|
process_header( superh_unistd, superh_dict )
|
||||||
|
|
||||||
# now perform the comparison
|
# now perform the comparison
|
||||||
errors = 0
|
errors = 0
|
||||||
for sc in syscalls:
|
|
||||||
sc_name = sc["name"]
|
|
||||||
sc_id = sc["id"]
|
|
||||||
if sc_id >= 0:
|
|
||||||
if not arm_dict.has_key(sc_name):
|
|
||||||
print "arm syscall %s not defined !!" % sc_name
|
|
||||||
errors += 1
|
|
||||||
elif arm_dict[sc_name] != sc_id:
|
|
||||||
print "arm syscall %s should be %d instead of %d !!" % (sc_name, arm_dict[sc_name], sc_id)
|
|
||||||
errors += 1
|
|
||||||
|
|
||||||
for sc in syscalls:
|
def check_syscalls(archname, idname, arch_dict):
|
||||||
|
errors = 0
|
||||||
|
for sc in syscalls:
|
||||||
sc_name = sc["name"]
|
sc_name = sc["name"]
|
||||||
sc_id2 = sc["id2"]
|
sc_id = sc[idname]
|
||||||
if sc_id2 >= 0:
|
if sc_id >= 0:
|
||||||
if not x86_dict.has_key(sc_name):
|
if not arch_dict.has_key(sc_name):
|
||||||
print "x86 syscall %s not defined !!" % sc_name
|
print "%s syscall %s not defined, should be %d !!" % (archname, sc_name, sc_id)
|
||||||
errors += 1
|
errors += 1
|
||||||
elif x86_dict[sc_name] != sc_id2:
|
elif not arch_dict.has_key(sc_name):
|
||||||
print "x86 syscall %s should be %d instead of %d !!" % (sc_name, x86_dict[sc_name], sc_id2)
|
print "%s syscall %s is not implemented!" % (archname, sc_name)
|
||||||
errors += 1
|
errors += 1
|
||||||
|
elif arch_dict[sc_name] != sc_id:
|
||||||
|
print "%s syscall %s should be %d instead of %d !!" % (archname, sc_name, arch_dict[sc_name], sc_id)
|
||||||
|
errors += 1
|
||||||
|
return errors
|
||||||
|
|
||||||
|
errors += check_syscalls("arm", "id", arm_dict)
|
||||||
|
errors += check_syscalls("x86", "id2", x86_dict)
|
||||||
|
errors += check_syscalls("superh", "id3", superh_dict)
|
||||||
|
|
||||||
if errors == 0:
|
if errors == 0:
|
||||||
print "congratulations, everything's fine !!"
|
print "congratulations, everything's fine !!"
|
||||||
|
|||||||
@@ -557,7 +557,7 @@ class State:
|
|||||||
for sc in self.syscalls:
|
for sc in self.syscalls:
|
||||||
if sc.has_key("asm-arm") and 'arm' in all_archs:
|
if sc.has_key("asm-arm") and 'arm' in all_archs:
|
||||||
fname = "arch-arm/syscalls/%s.S" % sc["func"]
|
fname = "arch-arm/syscalls/%s.S" % sc["func"]
|
||||||
D( ">>> generating "+fname )
|
D2( ">>> generating "+fname )
|
||||||
fp = create_file( fname )
|
fp = create_file( fname )
|
||||||
fp.write(sc["asm-arm"])
|
fp.write(sc["asm-arm"])
|
||||||
fp.close()
|
fp.close()
|
||||||
@@ -565,7 +565,7 @@ class State:
|
|||||||
|
|
||||||
if sc.has_key("asm-thumb") and 'arm' in all_archs:
|
if sc.has_key("asm-thumb") and 'arm' in all_archs:
|
||||||
fname = "arch-arm/syscalls/%s.S" % sc["func"]
|
fname = "arch-arm/syscalls/%s.S" % sc["func"]
|
||||||
D( ">>> generating "+fname )
|
D2( ">>> generating "+fname )
|
||||||
fp = create_file( fname )
|
fp = create_file( fname )
|
||||||
fp.write(sc["asm-thumb"])
|
fp.write(sc["asm-thumb"])
|
||||||
fp.close()
|
fp.close()
|
||||||
@@ -573,7 +573,7 @@ class State:
|
|||||||
|
|
||||||
if sc.has_key("asm-x86") and 'x86' in all_archs:
|
if sc.has_key("asm-x86") and 'x86' in all_archs:
|
||||||
fname = "arch-x86/syscalls/%s.S" % sc["func"]
|
fname = "arch-x86/syscalls/%s.S" % sc["func"]
|
||||||
D( ">>> generating "+fname )
|
D2( ">>> generating "+fname )
|
||||||
fp = create_file( fname )
|
fp = create_file( fname )
|
||||||
fp.write(sc["asm-x86"])
|
fp.write(sc["asm-x86"])
|
||||||
fp.close()
|
fp.close()
|
||||||
@@ -581,7 +581,7 @@ class State:
|
|||||||
|
|
||||||
if sc.has_key("asm-sh"):
|
if sc.has_key("asm-sh"):
|
||||||
fname = "arch-sh/syscalls/%s.S" % sc["func"]
|
fname = "arch-sh/syscalls/%s.S" % sc["func"]
|
||||||
D( ">>> generating "+fname )
|
D2( ">>> generating "+fname )
|
||||||
fp = create_file( fname )
|
fp = create_file( fname )
|
||||||
fp.write(sc["asm-sh"])
|
fp.write(sc["asm-sh"])
|
||||||
fp.close()
|
fp.close()
|
||||||
@@ -626,7 +626,7 @@ class State:
|
|||||||
|
|
||||||
for stub in self.new_stubs + self.other_files:
|
for stub in self.new_stubs + self.other_files:
|
||||||
if not os.path.exists( bionic_root + stub ):
|
if not os.path.exists( bionic_root + stub ):
|
||||||
# new file, P4 add it
|
# new file, git add it
|
||||||
D( "new file: " + stub)
|
D( "new file: " + stub)
|
||||||
adds.append( bionic_root + stub )
|
adds.append( bionic_root + stub )
|
||||||
shutil.copyfile( bionic_temp + stub, bionic_root + stub )
|
shutil.copyfile( bionic_temp + stub, bionic_root + stub )
|
||||||
@@ -643,16 +643,21 @@ class State:
|
|||||||
|
|
||||||
|
|
||||||
if adds:
|
if adds:
|
||||||
commands.getoutput("p4 add " + " ".join(adds))
|
commands.getoutput("git add " + " ".join(adds))
|
||||||
if deletes:
|
if deletes:
|
||||||
commands.getoutput("p4 delete " + " ".join(deletes))
|
commands.getoutput("git rm " + " ".join(deletes))
|
||||||
if edits:
|
if edits:
|
||||||
commands.getoutput("p4 edit " +
|
|
||||||
" ".join((bionic_root + file) for file in edits))
|
|
||||||
for file in edits:
|
for file in edits:
|
||||||
shutil.copyfile( bionic_temp + file, bionic_root + file )
|
shutil.copyfile( bionic_temp + file, bionic_root + file )
|
||||||
|
commands.getoutput("git add " +
|
||||||
|
" ".join((bionic_root + file) for file in edits))
|
||||||
|
|
||||||
D("ready to go !!")
|
commands.getoutput("git add %s%s" % (bionic_root,"SYSCALLS.TXT"))
|
||||||
|
|
||||||
|
if (not adds) and (not deletes) and (not edits):
|
||||||
|
D("no changes detected!")
|
||||||
|
else:
|
||||||
|
D("ready to go!!")
|
||||||
|
|
||||||
D_setlevel(1)
|
D_setlevel(1)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user