Fixed a ton of Python lint errors, enabled python lint checking.
BUG= Review URL: https://webrtc-codereview.appspot.com/1166004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@3627 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
52b57cc0d5
commit
5d37139374
29
PRESUBMIT.py
29
PRESUBMIT.py
@ -6,8 +6,6 @@
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import os.path
|
||||
|
||||
def _LicenseHeader(input_api):
|
||||
"""Returns the license header regexp."""
|
||||
# Accept any year number from 2011 to the current year
|
||||
@ -67,10 +65,6 @@ def _CheckNoFRIEND_TEST(input_api, output_api):
|
||||
'gtest\'s FRIEND_TEST() macro. Include testsupport/gtest_prod_util.h and '
|
||||
'use FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))]
|
||||
|
||||
def _IsLintWhitelisted(file_name):
|
||||
""" Checks if a file is whitelisted for lint check."""
|
||||
return True
|
||||
|
||||
def _CheckApprovedFilesLintClean(input_api, output_api,
|
||||
source_file_filter=None):
|
||||
"""Checks that all new or whitelisted .cc and .h files pass cpplint.py.
|
||||
@ -100,7 +94,7 @@ def _CheckApprovedFilesLintClean(input_api, output_api,
|
||||
files = []
|
||||
for f in input_api.AffectedSourceFiles(source_file_filter):
|
||||
# Note that moved/renamed files also count as added for svn.
|
||||
if (f.Action() == 'A' or _IsLintWhitelisted(f.LocalPath())):
|
||||
if (f.Action() == 'A'):
|
||||
files.append(f.AbsoluteLocalPath())
|
||||
|
||||
for file_name in files:
|
||||
@ -121,6 +115,27 @@ def _CommonChecks(input_api, output_api):
|
||||
"""Checks common to both upload and commit."""
|
||||
# TODO(kjellander): Use presubmit_canned_checks.PanProjectChecks too.
|
||||
results = []
|
||||
results.extend(input_api.canned_checks.RunPylint(input_api, output_api,
|
||||
black_list=(r'^.*gviz_api\.py$',
|
||||
r'^.*gaeunit\.py$',
|
||||
r'^third_party/.*\.py$',
|
||||
r'^testing/.*\.py$',
|
||||
r'^tools/gyp/.*\.py$',
|
||||
r'^tools/perf_expectations/.*\.py$',
|
||||
r'^tools/python/.*\.py$',
|
||||
r'^tools/python_charts/data/.*\.py$',
|
||||
r'^tools/refactoring.*\.py$',
|
||||
# TODO(phoglund): should arguably be checked.
|
||||
r'^tools/valgrind-webrtc/.*\.py$',
|
||||
r'^tools/valgrind/.*\.py$',
|
||||
# TODO(phoglund): should arguably be checked.
|
||||
r'^webrtc/build/.*\.py$',
|
||||
r'^build/.*\.py$',
|
||||
r'^out/.*\.py$',),
|
||||
disabled_warnings=['F0401', # Failed to import x
|
||||
'E0611', # No package y in x
|
||||
'W0232', # Class has no __init__ method
|
||||
]))
|
||||
results.extend(input_api.canned_checks.CheckLongLines(
|
||||
input_api, output_api))
|
||||
results.extend(input_api.canned_checks.CheckChangeHasNoTabs(
|
||||
|
@ -2,15 +2,12 @@
|
||||
#
|
||||
# Copyright 2011 Google Inc. All Rights Reserved.
|
||||
|
||||
# pylint: disable-msg=C6310
|
||||
|
||||
"""WebRTC Demo
|
||||
|
||||
This module demonstrates the WebRTC API by implementing a simple video chat app.
|
||||
"""
|
||||
|
||||
import cgi
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
@ -26,13 +23,13 @@ jinja_environment = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
|
||||
|
||||
# Lock for syncing DB operation in concurrent requests handling.
|
||||
# TODO(brave): keeping working on improving performance with thread syncing.
|
||||
# TODO(brave): keeping working on improving performance with thread syncing.
|
||||
# One possible method for near future is to reduce the message caching.
|
||||
LOCK = threading.RLock()
|
||||
|
||||
def generate_random(len):
|
||||
def generate_random(length):
|
||||
word = ''
|
||||
for i in range(len):
|
||||
for _ in range(length):
|
||||
word += random.choice('0123456789')
|
||||
return word
|
||||
|
||||
@ -66,18 +63,19 @@ def make_loopback_answer(message):
|
||||
def maybe_add_fake_crypto(message):
|
||||
if message.find("a=crypto") == -1:
|
||||
index = len(message)
|
||||
crypto_line = "a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:BAADBAADBAADBAADBAADBAADBAADBAADBAADBAAD\\r\\n"
|
||||
crypto_line = ("a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:"
|
||||
"BAADBAADBAADBAADBAADBAADBAADBAADBAADBAAD\\r\\n")
|
||||
# reverse find for multiple find and insert operations.
|
||||
index = message.rfind("c=IN", 0, index)
|
||||
while (index != -1):
|
||||
index = message.rfind("c=IN", 0, index)
|
||||
while (index != -1):
|
||||
message = message[:index] + crypto_line + message[index:]
|
||||
index = message.rfind("c=IN", 0, index)
|
||||
index = message.rfind("c=IN", 0, index)
|
||||
return message
|
||||
|
||||
def handle_message(room, user, message):
|
||||
message_obj = json.loads(message)
|
||||
other_user = room.get_other_user(user)
|
||||
room_key = room.key().id_or_name();
|
||||
room_key = room.key().id_or_name()
|
||||
if message_obj['type'] == 'bye':
|
||||
# This would remove the other_user in loopback test too.
|
||||
# So check its availability before forwarding Bye message.
|
||||
@ -89,7 +87,7 @@ def handle_message(room, user, message):
|
||||
# Special case the loopback scenario.
|
||||
if other_user == user:
|
||||
message = make_loopback_answer(message)
|
||||
# Workaround Chrome bug.
|
||||
# Workaround Chrome bug.
|
||||
# Insert a=crypto line into offer from FireFox.
|
||||
# TODO(juberti): Remove this call.
|
||||
message = maybe_add_fake_crypto(message)
|
||||
@ -108,14 +106,14 @@ def send_saved_messages(client_id):
|
||||
messages = get_saved_messages(client_id)
|
||||
for message in messages:
|
||||
channel.send_message(client_id, message.msg)
|
||||
logging.info('Delivered saved message to ' + client_id);
|
||||
logging.info('Delivered saved message to ' + client_id)
|
||||
message.delete()
|
||||
|
||||
def on_message(room, user, message):
|
||||
client_id = make_client_id(room, user)
|
||||
if room.is_connected(user):
|
||||
channel.send_message(client_id, message)
|
||||
logging.info('Delivered message to user ' + user);
|
||||
logging.info('Delivered message to user ' + user)
|
||||
else:
|
||||
new_message = Message(client_id = client_id, msg = message)
|
||||
new_message.put()
|
||||
@ -128,7 +126,7 @@ def make_media_constraints(hd_video):
|
||||
# Demo with WHD by setting size with 1280x720.
|
||||
constraints['mandatory']['minHeight'] = 720
|
||||
constraints['mandatory']['minWidth'] = 1280
|
||||
# Disabled for now due to weird stretching behavior on Mac.
|
||||
# Disabled for now due to weird stretching behavior on Mac.
|
||||
#else:
|
||||
# Demo with WVGA by setting Aspect Ration;
|
||||
#constraints['mandatory']['maxAspectRatio'] = 1.778
|
||||
@ -172,13 +170,13 @@ class Room(db.Model):
|
||||
user2_connected = db.BooleanProperty(default=False)
|
||||
|
||||
def __str__(self):
|
||||
str = '['
|
||||
result = '['
|
||||
if self.user1:
|
||||
str += "%s-%r" % (self.user1, self.user1_connected)
|
||||
result += "%s-%r" % (self.user1, self.user1_connected)
|
||||
if self.user2:
|
||||
str += ", %s-%r" % (self.user2, self.user2_connected)
|
||||
str += ']'
|
||||
return str
|
||||
result += ", %s-%r" % (self.user2, self.user2_connected)
|
||||
result += ']'
|
||||
return result
|
||||
|
||||
def get_occupancy(self):
|
||||
occupancy = 0
|
||||
@ -269,7 +267,8 @@ class DisconnectPage(webapp2.RequestHandler):
|
||||
logging.info('User ' + user + ' removed from room ' + room_key)
|
||||
logging.info('Room ' + room_key + ' has state ' + str(room))
|
||||
if other_user and other_user != user:
|
||||
channel.send_message(make_client_id(room, other_user), '{"type":"bye"}')
|
||||
channel.send_message(make_client_id(room, other_user),
|
||||
'{"type":"bye"}')
|
||||
logging.info('Sent BYE to ' + other_user)
|
||||
logging.warning('User ' + user + ' disconnected from room ' + room_key)
|
||||
|
||||
@ -288,7 +287,6 @@ class MessagePage(webapp2.RequestHandler):
|
||||
|
||||
class MainPage(webapp2.RequestHandler):
|
||||
"""The main UI page, renders the 'index.html' template."""
|
||||
|
||||
def get(self):
|
||||
"""Renders the main page. When this page is shown, we create a new
|
||||
channel to push asynchronous updates to the client."""
|
||||
@ -353,7 +351,7 @@ class MainPage(webapp2.RequestHandler):
|
||||
self.response.out.write(template.render({ 'room_key': room_key }))
|
||||
logging.info('Room ' + room_key + ' is full')
|
||||
return
|
||||
|
||||
|
||||
room_link = base_url + '/?r=' + room_key
|
||||
room_link = append_url_arguments(self.request, room_link)
|
||||
token = create_channel(room, user, token_timeout)
|
||||
|
@ -14,24 +14,23 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import webapp2
|
||||
import os
|
||||
from google.appengine.ext.webapp import template
|
||||
|
||||
class PageHandler(webapp2.RequestHandler):
|
||||
def get(self):
|
||||
base_url = self.request.path
|
||||
if self.request.path == '/':
|
||||
self.redirect("http://webrtc.googlecode.com/svn/trunk/"
|
||||
+ "samples/js/demos/index.html"
|
||||
, permanent=True)
|
||||
else:
|
||||
self.redirect("http://webrtc.googlecode.com/svn/trunk/"
|
||||
+ "samples/js/demos"
|
||||
+ base_url,
|
||||
permanent=True)
|
||||
def get(self):
|
||||
base_url = self.request.path
|
||||
if self.request.path == '/':
|
||||
self.redirect("http://webrtc.googlecode.com/svn/trunk/"
|
||||
+ "samples/js/demos/index.html"
|
||||
, permanent=True)
|
||||
else:
|
||||
self.redirect("http://webrtc.googlecode.com/svn/trunk/"
|
||||
+ "samples/js/demos"
|
||||
+ base_url,
|
||||
permanent=True)
|
||||
|
||||
app = webapp2.WSGIApplication([
|
||||
(r'/*.*', PageHandler),
|
||||
], debug=True)
|
||||
], debug=True)
|
||||
|
||||
|
@ -77,7 +77,7 @@ def decode_frames(barcode_width, barcode_height, input_directory='.',
|
||||
(bool): True if the decoding went without errors.
|
||||
"""
|
||||
jars = helper_functions.form_jars_string(path_to_zxing)
|
||||
command_line_decoder ='com.google.zxing.client.j2se.CommandLineRunner'
|
||||
command_line_decoder = 'com.google.zxing.client.j2se.CommandLineRunner'
|
||||
return helper_functions.perform_action_on_all_files(
|
||||
directory=input_directory, file_pattern='frame_',
|
||||
file_extension='png', start_number=1, action=_decode_barcode_in_file,
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
import optparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import helper_functions
|
||||
@ -41,7 +40,7 @@ def generate_upca_barcodes(number_of_barcodes, barcode_width, barcode_height,
|
||||
"""
|
||||
base_file_name = os.path.join(output_directory, "barcode_")
|
||||
jars = helper_functions.form_jars_string(path_to_zxing)
|
||||
command_line_encoder ='com.google.zxing.client.j2se.CommandLineEncoder'
|
||||
command_line_encoder = 'com.google.zxing.client.j2se.CommandLineEncoder'
|
||||
barcode_width = str(barcode_width)
|
||||
barcode_height = str(barcode_height)
|
||||
|
||||
@ -109,7 +108,7 @@ def _convert_to_yuv_and_delete(output_directory, file_name, pattern):
|
||||
try:
|
||||
helper_functions.run_shell_command(
|
||||
command, msg=('Error during PNG to YUV conversion of %s' %
|
||||
file_name));
|
||||
file_name))
|
||||
os.remove(file_name)
|
||||
except helper_functions.HelperError, err:
|
||||
print >> sys.stderr, err
|
||||
@ -154,7 +153,7 @@ def _add_to_file_and_delete(output_file, file_name):
|
||||
input_file.close()
|
||||
try:
|
||||
os.remove(file_name)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
sys.stderr.write('Error in deleting file %s' % file_name)
|
||||
return False
|
||||
return True
|
||||
|
@ -27,7 +27,7 @@ def run_ant_build_command(path_to_ant_build_file):
|
||||
if process.returncode != 0:
|
||||
print >> sys.stderr, 'Failed to execute: %s' % ' '.join(cmd)
|
||||
return process.returncode
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print >> sys.stderr, 'Failed to execute: %s' % ' '.join(cmd)
|
||||
return -1
|
||||
|
||||
|
@ -117,7 +117,7 @@ def _ReleaseLock(lock_file, lock_filename):
|
||||
os.remove(lock_filename)
|
||||
|
||||
|
||||
def run_coverity(options, args):
|
||||
def run_coverity(options):
|
||||
"""Runs all the selected tests for the given build type and target."""
|
||||
# Create the lock file to prevent another instance of this script from
|
||||
# running.
|
||||
@ -155,9 +155,9 @@ def run_coverity(options, args):
|
||||
|
||||
# Do a clean build. Remove the build output directory first.
|
||||
if sys.platform.startswith('linux'):
|
||||
rm_path = os.path.join(options.source_dir,'out',options.target)
|
||||
rm_path = os.path.join(options.source_dir, 'out', options.target)
|
||||
elif sys.platform == 'win32':
|
||||
rm_path = os.path.join(options.source_dir,options.solution_dir,
|
||||
rm_path = os.path.join(options.source_dir, options.solution_dir,
|
||||
options.target)
|
||||
elif sys.platform == 'darwin':
|
||||
rm_path = os.path.join(options.source_dir,'xcodebuild')
|
||||
@ -169,16 +169,16 @@ def run_coverity(options, args):
|
||||
if options.dry_run:
|
||||
print 'shutil.rmtree(%s)' % repr(rm_path)
|
||||
else:
|
||||
shutil.rmtree(rm_path,True)
|
||||
shutil.rmtree(rm_path, True)
|
||||
|
||||
if options.preserve_intermediate_dir:
|
||||
print 'Preserving intermediate directory.'
|
||||
print 'Preserving intermediate directory.'
|
||||
else:
|
||||
if options.dry_run:
|
||||
print 'shutil.rmtree(%s)' % repr(options.coverity_intermediate_dir)
|
||||
print 'os.mkdir(%s)' % repr(options.coverity_intermediate_dir)
|
||||
else:
|
||||
shutil.rmtree(options.coverity_intermediate_dir,True)
|
||||
shutil.rmtree(options.coverity_intermediate_dir, True)
|
||||
os.mkdir(options.coverity_intermediate_dir)
|
||||
|
||||
print 'Elapsed time: %ds' % (time.time() - start_time)
|
||||
@ -316,8 +316,8 @@ def main():
|
||||
action='store_true', help=helpmsg,
|
||||
default=False)
|
||||
|
||||
options, args = option_parser.parse_args()
|
||||
return run_coverity(options, args)
|
||||
options, _ = option_parser.parse_args()
|
||||
return run_coverity(options)
|
||||
|
||||
|
||||
if '__main__' == __name__:
|
||||
|
@ -23,7 +23,6 @@ import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
import perf.perf_utils
|
||||
@ -49,7 +48,7 @@ def main(argv):
|
||||
help='command-line arguments for comparison tool')
|
||||
parser.add_option('--regexp',
|
||||
help='regular expression to extract the comparison metric')
|
||||
(options, args) = parser.parse_args(argv[1:])
|
||||
options, _ = parser.parse_args(argv[1:])
|
||||
|
||||
# Get the initial default capture device, to restore later.
|
||||
command = ['pacmd', 'list-sources']
|
||||
|
@ -80,7 +80,8 @@ class NetworkEmulator(object):
|
||||
'any', self._port_range)
|
||||
logging.debug('Created outgoing rule: %s', outgoing_rule_id)
|
||||
|
||||
def check_permissions(self):
|
||||
@staticmethod
|
||||
def check_permissions():
|
||||
"""Checks if permissions are available to run Dummynet commands.
|
||||
|
||||
Raises:
|
||||
@ -88,19 +89,20 @@ class NetworkEmulator(object):
|
||||
available.
|
||||
"""
|
||||
if os.geteuid() != 0:
|
||||
self._run_shell_command(
|
||||
_run_shell_command(
|
||||
['sudo', '-n', 'ipfw', '-h'],
|
||||
msg=('Cannot run \'ipfw\' command. This script must be run as '
|
||||
'root or have password-less sudo access to this command.'))
|
||||
|
||||
def cleanup(self):
|
||||
@staticmethod
|
||||
def cleanup():
|
||||
"""Stops the network emulation by flushing all Dummynet rules.
|
||||
|
||||
Notice that this will flush any rules that may have been created previously
|
||||
before starting the emulation.
|
||||
"""
|
||||
self._run_shell_command(['sudo', 'ipfw', '-f', 'flush'],
|
||||
'Failed to flush Dummynet rules!')
|
||||
_run_shell_command(['sudo', 'ipfw', '-f', 'flush'],
|
||||
'Failed to flush Dummynet rules!')
|
||||
|
||||
def _create_dummynet_rule(self, pipe_id, from_address, to_address,
|
||||
port_range):
|
||||
@ -121,10 +123,10 @@ class NetworkEmulator(object):
|
||||
self._rule_counter += 100
|
||||
add_part = ['sudo', 'ipfw', 'add', self._rule_counter, 'pipe', pipe_id,
|
||||
'ip', 'from', from_address, 'to', to_address]
|
||||
self._run_shell_command(add_part + ['src-port', '%s-%s' % port_range],
|
||||
'Failed to add Dummynet src-port rule.')
|
||||
self._run_shell_command(add_part + ['dst-port', '%s-%s' % port_range],
|
||||
'Failed to add Dummynet dst-port rule.')
|
||||
_run_shell_command(add_part + ['src-port', '%s-%s' % port_range],
|
||||
'Failed to add Dummynet src-port rule.')
|
||||
_run_shell_command(add_part + ['dst-port', '%s-%s' % port_range],
|
||||
'Failed to add Dummynet dst-port rule.')
|
||||
return self._rule_counter
|
||||
|
||||
def _create_dummynet_pipe(self, bandwidth_kbps, delay_ms, packet_loss_percent,
|
||||
@ -149,30 +151,31 @@ class NetworkEmulator(object):
|
||||
if sys.platform.startswith('linux'):
|
||||
error_message += ('Make sure you have loaded the ipfw_mod.ko module to '
|
||||
'your kernel (sudo insmod /path/to/ipfw_mod.ko)')
|
||||
self._run_shell_command(cmd, error_message)
|
||||
_run_shell_command(cmd, error_message)
|
||||
return self._pipe_counter
|
||||
|
||||
def _run_shell_command(self, command, msg=None):
|
||||
"""Executes a command.
|
||||
|
||||
Args:
|
||||
command: Command list to execute.
|
||||
msg: Message describing the error in case the command fails.
|
||||
def _run_shell_command(command, msg=None):
|
||||
"""Executes a command.
|
||||
|
||||
Returns:
|
||||
The standard output from running the command.
|
||||
Args:
|
||||
command: Command list to execute.
|
||||
msg: Message describing the error in case the command fails.
|
||||
|
||||
Raises:
|
||||
NetworkEmulatorError: If command fails. Message is set by the msg
|
||||
parameter.
|
||||
"""
|
||||
cmd_list = [str(x) for x in command]
|
||||
cmd = ' '.join(cmd_list)
|
||||
logging.debug('Running command: %s', cmd)
|
||||
Returns:
|
||||
The standard output from running the command.
|
||||
|
||||
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
output, error = process.communicate()
|
||||
if process.returncode != 0:
|
||||
raise NetworkEmulatorError(msg, cmd, process.returncode, output, error)
|
||||
return output.strip()
|
||||
Raises:
|
||||
NetworkEmulatorError: If command fails. Message is set by the msg
|
||||
parameter.
|
||||
"""
|
||||
cmd_list = [str(x) for x in command]
|
||||
cmd = ' '.join(cmd_list)
|
||||
logging.debug('Running command: %s', cmd)
|
||||
|
||||
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
output, error = process.communicate()
|
||||
if process.returncode != 0:
|
||||
raise NetworkEmulatorError(msg, cmd, process.returncode, output, error)
|
||||
return output.strip()
|
||||
|
@ -1,6 +1,7 @@
|
||||
# Sample output from the video_quality_measurment program, included only for
|
||||
# reference. Geneate your own by running with the --python flag and then change
|
||||
# the filenames in main.py
|
||||
|
||||
test_configuration = [{'name': 'name', 'value': 'VP8 hardware test'},
|
||||
{'name': 'description', 'value': ''},
|
||||
{'name': 'test_number', 'value': '0'},
|
||||
|
@ -1,6 +1,7 @@
|
||||
# Sample output from the video_quality_measurment program, included only for
|
||||
# reference. Geneate your own by running with the --python flag and then change
|
||||
# the filenames in main.py
|
||||
|
||||
test_configuration = [{'name': 'name', 'value': 'VP8 software test'},
|
||||
{'name': 'description', 'value': ''},
|
||||
{'name': 'test_number', 'value': '0'},
|
||||
|
@ -17,17 +17,17 @@ class DataHelper(object):
|
||||
|
||||
def __init__(self, data_list, table_description, names_list, messages):
|
||||
""" Initializes the DataHelper with data.
|
||||
|
||||
|
||||
Args:
|
||||
data_list: List of one or more data lists in the format that the
|
||||
data_list: List of one or more data lists in the format that the
|
||||
Google Visualization Python API expects (list of dictionaries, one
|
||||
per row of data). See the gviz_api.DataTable documentation for more
|
||||
per row of data). See the gviz_api.DataTable documentation for more
|
||||
info.
|
||||
table_description: dictionary describing the data types of all
|
||||
columns in the data lists, as defined in the gviz_api.DataTable
|
||||
documentation.
|
||||
names_list: List of strings of what we're going to name the data
|
||||
columns after. Usually different runs of data collection.
|
||||
columns after. Usually different runs of data collection.
|
||||
messages: List of strings we might append error messages to.
|
||||
"""
|
||||
self.data_list = data_list
|
||||
@ -36,29 +36,29 @@ class DataHelper(object):
|
||||
self.messages = messages
|
||||
self.number_of_datasets = len(data_list)
|
||||
self.number_of_frames = len(data_list[0])
|
||||
|
||||
|
||||
def CreateData(self, field_name, start_frame=0, end_frame=0):
|
||||
""" Creates a data structure for a specified data field.
|
||||
|
||||
Creates a data structure (data type description dictionary and a list
|
||||
of data dictionaries) to be used with the Google Visualization Python
|
||||
|
||||
Creates a data structure (data type description dictionary and a list
|
||||
of data dictionaries) to be used with the Google Visualization Python
|
||||
API. The frame_number column is always present and one column per data
|
||||
set is added and its field name is suffixed by _N where N is the number
|
||||
set is added and its field name is suffixed by _N where N is the number
|
||||
of the data set (0, 1, 2...)
|
||||
|
||||
|
||||
Args:
|
||||
field_name: String name of the field, must be present in the data
|
||||
structure this DataHelper was created with.
|
||||
start_frame: Frame number to start at (zero indexed). Default: 0.
|
||||
end_frame: Frame number to be the last frame. If zero all frames
|
||||
end_frame: Frame number to be the last frame. If zero all frames
|
||||
will be included. Default: 0.
|
||||
|
||||
|
||||
Returns:
|
||||
A tuple containing:
|
||||
- a dictionary describing the columns in the data result_data_table below.
|
||||
This description uses the name for each data set specified by
|
||||
names_list.
|
||||
|
||||
This description uses the name for each data set specified by
|
||||
names_list.
|
||||
|
||||
Example with two data sets named 'Foreman' and 'Crew':
|
||||
{
|
||||
'frame_number': ('number', 'Frame number'),
|
||||
@ -66,36 +66,36 @@ class DataHelper(object):
|
||||
'ssim_1': ('number', 'Crew'),
|
||||
}
|
||||
- a list containing dictionaries (one per row) with the frame_number
|
||||
column and one column of the specified field_name column per data
|
||||
set.
|
||||
|
||||
column and one column of the specified field_name column per data
|
||||
set.
|
||||
|
||||
Example with two data sets named 'Foreman' and 'Crew':
|
||||
[
|
||||
{'frame_number': 0, 'ssim_0': 0.98, 'ssim_1': 0.77 },
|
||||
{'frame_number': 1, 'ssim_0': 0.81, 'ssim_1': 0.53 },
|
||||
]
|
||||
"""
|
||||
|
||||
|
||||
# Build dictionary that describes the data types
|
||||
result_table_description = {'frame_number': ('string', 'Frame number')}
|
||||
result_table_description = {'frame_number': ('string', 'Frame number')}
|
||||
for dataset_index in range(self.number_of_datasets):
|
||||
column_name = '%s_%s' % (field_name, dataset_index)
|
||||
column_type = self.table_description[field_name][0]
|
||||
column_description = self.names_list[dataset_index]
|
||||
result_table_description[column_name] = (column_type, column_description)
|
||||
|
||||
# Build data table of all the data
|
||||
# Build data table of all the data
|
||||
result_data_table = []
|
||||
# We're going to have one dictionary per row.
|
||||
# We're going to have one dictionary per row.
|
||||
# Create that and copy frame_number values from the first data set
|
||||
for source_row in self.data_list[0]:
|
||||
row_dict = { 'frame_number': source_row['frame_number'] }
|
||||
result_data_table.append(row_dict)
|
||||
|
||||
|
||||
# Pick target field data points from the all data tables
|
||||
if end_frame == 0: # Default to all frames
|
||||
end_frame = self.number_of_frames
|
||||
|
||||
|
||||
for dataset_index in range(self.number_of_datasets):
|
||||
for row_number in range(start_frame, end_frame):
|
||||
column_name = '%s_%s' % (field_name, dataset_index)
|
||||
@ -105,14 +105,14 @@ class DataHelper(object):
|
||||
self.data_list[dataset_index][row_number][field_name]
|
||||
except IndexError:
|
||||
self.messages.append("Couldn't find frame data for row %d "
|
||||
"for %s" % (row_number, self.names_list[dataset_index]))
|
||||
"for %s" % (row_number, self.names_list[dataset_index]))
|
||||
break
|
||||
return result_table_description, result_data_table
|
||||
|
||||
def GetOrdering(self, table_description):
|
||||
def GetOrdering(self, table_description): # pylint: disable=R0201
|
||||
""" Creates a list of column names, ordered alphabetically except for the
|
||||
frame_number column which always will be the first column.
|
||||
|
||||
|
||||
Args:
|
||||
table_description: A dictionary of column definitions as defined by the
|
||||
gviz_api.DataTable documentation.
|
||||
@ -121,9 +121,9 @@ class DataHelper(object):
|
||||
remaining columns are sorted alphabetically.
|
||||
"""
|
||||
# The JSON data representation generated from gviz_api.DataTable.ToJSon()
|
||||
# must have frame_number as its first column in order for the chart to
|
||||
# must have frame_number as its first column in order for the chart to
|
||||
# use it as it's X-axis value series.
|
||||
# gviz_api.DataTable orders the columns by name by default, which will
|
||||
# gviz_api.DataTable orders the columns by name by default, which will
|
||||
# be incorrect if we have column names that are sorted before frame_number
|
||||
# in our data table.
|
||||
columns_ordering = ['frame_number']
|
||||
@ -132,8 +132,8 @@ class DataHelper(object):
|
||||
if column != 'frame_number':
|
||||
columns_ordering.append(column)
|
||||
return columns_ordering
|
||||
|
||||
def CreateConfigurationTable(self, configurations):
|
||||
|
||||
def CreateConfigurationTable(self, configurations): # pylint: disable=R0201
|
||||
""" Combines multiple test data configurations for display.
|
||||
|
||||
Args:
|
||||
@ -175,9 +175,9 @@ class DataHelper(object):
|
||||
for configuration in configurations:
|
||||
data = {}
|
||||
result_data.append(data)
|
||||
for dict in configuration:
|
||||
name = dict['name']
|
||||
value = dict['value']
|
||||
for values in configuration:
|
||||
name = values['name']
|
||||
value = values['value']
|
||||
result_description[name] = 'string'
|
||||
data[name] = value
|
||||
return result_description, result_data
|
||||
|
@ -7,8 +7,6 @@
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
__author__ = 'kjellander@webrtc.org (Henrik Kjellander)'
|
||||
|
||||
import unittest
|
||||
import webrtc.data_helper
|
||||
|
||||
@ -16,13 +14,13 @@ class Test(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Simulate frame data from two different test runs, with 2 frames each.
|
||||
self.frame_data_0 = [{'frame_number': 0, 'ssim': 0.5, 'psnr': 30.5},
|
||||
self.frame_data_0 = [{'frame_number': 0, 'ssim': 0.5, 'psnr': 30.5},
|
||||
{'frame_number': 1, 'ssim': 0.55, 'psnr': 30.55}]
|
||||
self.frame_data_1 = [{'frame_number': 0, 'ssim': 0.6, 'psnr': 30.6},
|
||||
{'frame_number': 0, 'ssim': 0.66, 'psnr': 30.66}]
|
||||
self.all_data = [ self.frame_data_0, self.frame_data_1 ]
|
||||
|
||||
# Test with frame_number column in a non-first position sice we need to
|
||||
|
||||
# Test with frame_number column in a non-first position sice we need to
|
||||
# support reordering that to be able to use the gviz_api as we want.
|
||||
self.type_description = {
|
||||
'ssim': ('number', 'SSIM'),
|
||||
@ -66,13 +64,13 @@ class Test(unittest.TestCase):
|
||||
self.assertEquals(1, row['frame_number'])
|
||||
self.assertEquals(0.55, row['ssim_0'])
|
||||
self.assertEquals(0.66, row['ssim_1'])
|
||||
|
||||
description, data_table = helper.CreateData('psnr')
|
||||
|
||||
description, data_table = helper.CreateData('psnr')
|
||||
self.assertEqual(3, len(description))
|
||||
self.assertTrue('frame_number' in description)
|
||||
self.assertTrue('psnr_0' in description)
|
||||
self.assertTrue('psnr_1' in description)
|
||||
self.assertEqual(0, len(messages))
|
||||
self.assertTrue('psnr_1' in description)
|
||||
self.assertEqual(0, len(messages))
|
||||
|
||||
self.assertEquals(2, len(data_table))
|
||||
row = data_table[0]
|
||||
@ -83,21 +81,21 @@ class Test(unittest.TestCase):
|
||||
self.assertEquals(1, row['frame_number'])
|
||||
self.assertEquals(30.55, row['psnr_0'])
|
||||
self.assertEquals(30.66, row['psnr_1'])
|
||||
|
||||
|
||||
def testGetOrdering(self):
|
||||
""" Tests that the ordering help method returns a list with frame_number
|
||||
""" Tests that the ordering help method returns a list with frame_number
|
||||
first and the rest sorted alphabetically """
|
||||
messages = []
|
||||
helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
|
||||
self.names, messages)
|
||||
description, data_table = helper.CreateData('ssim')
|
||||
description, _ = helper.CreateData('ssim')
|
||||
columns = helper.GetOrdering(description)
|
||||
self.assertEqual(3, len(columns))
|
||||
self.assertEqual(0, len(messages))
|
||||
self.assertEqual('frame_number', columns[0])
|
||||
self.assertEqual('ssim_0', columns[1])
|
||||
self.assertEqual('ssim_1', columns[2])
|
||||
|
||||
|
||||
def testCreateConfigurationTable(self):
|
||||
messages = []
|
||||
helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
|
||||
@ -110,6 +108,6 @@ class Test(unittest.TestCase):
|
||||
self.assertTrue(description.has_key('input_filename'))
|
||||
self.assertEquals('Test 0', data[0]['name'])
|
||||
self.assertEquals('Test 1', data[1]['name'])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
@ -7,30 +7,28 @@
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
__author__ = 'kjellander@webrtc.org (Henrik Kjellander)'
|
||||
|
||||
import os
|
||||
import gviz_api
|
||||
import webrtc.data_helper
|
||||
|
||||
def main():
|
||||
"""
|
||||
This Python script displays a web page with test created with the
|
||||
This Python script displays a web page with test created with the
|
||||
video_quality_measurement program, which is a tool in WebRTC.
|
||||
|
||||
|
||||
The script requires on two external files and one Python library:
|
||||
- A HTML template file with layout and references to the json variables
|
||||
- A HTML template file with layout and references to the json variables
|
||||
defined in this script
|
||||
- A data file in Python format, containing the following:
|
||||
- test_configuration - a dictionary of test configuration names and values.
|
||||
- frame_data_types - a dictionary that maps the different metrics to their
|
||||
- frame_data_types - a dictionary that maps the different metrics to their
|
||||
data types.
|
||||
- frame_data - a list of dictionaries where each dictionary maps a metric to
|
||||
it's value.
|
||||
- frame_data - a list of dictionaries where each dictionary maps a metric to
|
||||
it's value.
|
||||
- The gviz_api.py of the Google Visualization Python API, available at
|
||||
http://code.google.com/p/google-visualization-python/
|
||||
|
||||
The HTML file is shipped with the script, while the data file must be
|
||||
|
||||
The HTML file is shipped with the script, while the data file must be
|
||||
generated by running video_quality_measurement with the --python flag
|
||||
specified.
|
||||
"""
|
||||
@ -48,78 +46,78 @@ def main():
|
||||
page_template = f.read()
|
||||
f.close()
|
||||
except IOError as e:
|
||||
ShowErrorPage('Cannot open page template file: %s<br>Details: %s' %
|
||||
ShowErrorPage('Cannot open page template file: %s<br>Details: %s' %
|
||||
(page_template_filename, e))
|
||||
return
|
||||
|
||||
|
||||
# Read data from external Python script files. First check that they exist.
|
||||
for filename in data_filenames:
|
||||
if not os.path.exists(filename):
|
||||
messages.append('Cannot open data file: %s' % filename)
|
||||
data_filenames.remove(filename)
|
||||
|
||||
|
||||
# Read data from all existing input files.
|
||||
data_list = []
|
||||
test_configurations = []
|
||||
names = []
|
||||
|
||||
|
||||
for filename in data_filenames:
|
||||
read_vars = {} # empty dictionary to load the data into.
|
||||
execfile(filename, read_vars, read_vars)
|
||||
|
||||
|
||||
test_configuration = read_vars['test_configuration']
|
||||
table_description = read_vars['frame_data_types']
|
||||
table_data = read_vars['frame_data']
|
||||
|
||||
|
||||
# Verify the data in the file loaded properly.
|
||||
if not table_description or not table_data:
|
||||
messages.append('Invalid input file: %s. Missing description list or '
|
||||
'data dictionary variables.' % filename)
|
||||
continue
|
||||
|
||||
|
||||
# Frame numbers appear as number type in the data, but Chart API requires
|
||||
# values of the X-axis to be of string type.
|
||||
# Change the frame_number column data type:
|
||||
# Change the frame_number column data type:
|
||||
table_description['frame_number'] = ('string', 'Frame number')
|
||||
# Convert all the values to string types:
|
||||
# Convert all the values to string types:
|
||||
for row in table_data:
|
||||
row['frame_number'] = str(row['frame_number'])
|
||||
|
||||
|
||||
# Store the unique data from this file in the high level lists.
|
||||
test_configurations.append(test_configuration)
|
||||
data_list.append(table_data)
|
||||
# Name of the test run must be present.
|
||||
test_name = FindConfiguration(test_configuration, 'name')
|
||||
if not test_name:
|
||||
messages.append('Invalid input file: %s. Missing configuration key '
|
||||
messages.append('Invalid input file: %s. Missing configuration key '
|
||||
'"name"', filename)
|
||||
continue
|
||||
names.append(test_name)
|
||||
|
||||
|
||||
# Create data helper and build data tables for each graph.
|
||||
helper = webrtc.data_helper.DataHelper(data_list, table_description,
|
||||
helper = webrtc.data_helper.DataHelper(data_list, table_description,
|
||||
names, messages)
|
||||
|
||||
|
||||
# Loading it into gviz_api.DataTable objects and create JSON strings.
|
||||
description, data = helper.CreateConfigurationTable(test_configurations)
|
||||
configurations = gviz_api.DataTable(description, data)
|
||||
json_configurations = configurations.ToJSon()
|
||||
|
||||
|
||||
description, data = helper.CreateData('ssim')
|
||||
ssim = gviz_api.DataTable(description, data)
|
||||
json_ssim_data = ssim.ToJSon(helper.GetOrdering(description))
|
||||
|
||||
|
||||
description, data = helper.CreateData('psnr')
|
||||
psnr = gviz_api.DataTable(description, data)
|
||||
json_psnr_data = psnr.ToJSon(helper.GetOrdering(description))
|
||||
|
||||
|
||||
description, data = helper.CreateData('packets_dropped')
|
||||
packet_loss = gviz_api.DataTable(description, data)
|
||||
json_packet_loss_data = packet_loss.ToJSon(helper.GetOrdering(description))
|
||||
|
||||
json_packet_loss_data = packet_loss.ToJSon(helper.GetOrdering(description))
|
||||
|
||||
description, data = helper.CreateData('bit_rate')
|
||||
# Add a column of data points for the desired bit rate to be plotted.
|
||||
# (uses test configuration from the last data set, assuming it is the same
|
||||
# (uses test configuration from the last data set, assuming it is the same
|
||||
# for all of them)
|
||||
desired_bit_rate = FindConfiguration(test_configuration, 'bit_rate_in_kbps')
|
||||
if not desired_bit_rate:
|
||||
@ -135,12 +133,12 @@ def main():
|
||||
|
||||
# Format the messages list with newlines.
|
||||
messages = '\n'.join(messages)
|
||||
|
||||
|
||||
# Put the variables as JSon strings into the template.
|
||||
print page_template % vars()
|
||||
|
||||
def FindConfiguration(configuration, name):
|
||||
""" Finds a configuration value using it's name.
|
||||
""" Finds a configuration value using it's name.
|
||||
Returns the first configuration with a matching name. Returns None if no
|
||||
matching configuration is found. """
|
||||
return_value = None
|
||||
@ -152,6 +150,6 @@ def FindConfiguration(configuration, name):
|
||||
|
||||
def ShowErrorPage(error_message):
|
||||
print '<html><body>%s</body></html>' % error_message
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
"""Contains tweakable constants for quality dashboard utility scripts."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
# This identifies our application using the information we got when we
|
||||
# registered the application on Google appengine.
|
||||
DASHBOARD_SERVER = 'webrtc-dashboard.appspot.com'
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
"""Implements a handler for adding build status data."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
@ -126,7 +124,6 @@ class AddBuildStatusData(oauth_post_request_handler.OAuthPostRequestHandler):
|
||||
undesirable since we don't want multiple statuses for one bot-revision
|
||||
combination. Now we will effectively update the bot's status instead.
|
||||
"""
|
||||
|
||||
def _parse_and_store_data(self):
|
||||
build_status_root = _ensure_build_status_root_exists()
|
||||
build_status_data = _filter_oauth_parameters(self.request.arguments())
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
"""Implements a handler for adding coverage data."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
@ -24,7 +22,6 @@ REPORT_CATEGORIES = ('small_medium_tests', 'large_tests')
|
||||
|
||||
class CoverageData(db.Model):
|
||||
"""This represents one coverage report from the build bot."""
|
||||
|
||||
# The date the report was made.
|
||||
date = db.DateTimeProperty(required=True)
|
||||
|
||||
@ -52,7 +49,6 @@ class AddCoverageData(oauth_post_request_handler.OAuthPostRequestHandler):
|
||||
function_coverage: Function coverage percentage.
|
||||
branch_coverage: Branch coverage percentage.
|
||||
"""
|
||||
|
||||
def _parse_and_store_data(self):
|
||||
try:
|
||||
request_posix_timestamp = float(self.request.get('oauth_timestamp'))
|
||||
@ -77,7 +73,8 @@ class AddCoverageData(oauth_post_request_handler.OAuthPostRequestHandler):
|
||||
|
||||
def _parse_percentage(self, key):
|
||||
"""Parses out a percentage value from the request."""
|
||||
percentage = float(self.request.get(key))
|
||||
string_value = self.request.get(key)
|
||||
percentage = float(string_value)
|
||||
if percentage < 0.0 or percentage > 100.0:
|
||||
raise ValueError('%s is not a valid percentage.' % string_value)
|
||||
return percentage
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
"""Implements the quality tracker dashboard and reporting facilities."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import math
|
||||
|
||||
from google.appengine.ext.webapp import template
|
||||
@ -27,11 +25,11 @@ class ShowDashboard(webapp2.RequestHandler):
|
||||
The page is shown by grabbing data we have stored previously
|
||||
in the App Engine database using the AddCoverageData handler.
|
||||
"""
|
||||
|
||||
def get(self):
|
||||
build_status_loader = load_build_status.BuildStatusLoader()
|
||||
|
||||
# Split the build status data in two rows to fit them on the page.
|
||||
# pylint: disable=W0612
|
||||
build_status_data = build_status_loader.load_build_status_data()
|
||||
split_point = int(math.ceil(len(build_status_data) / 2.0))
|
||||
build_status_data_row_1 = build_status_data[:split_point]
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
"""Implements the LKGR page."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import webapp2
|
||||
|
||||
import load_build_status
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
"""Loads build status data for the dashboard."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
from google.appengine.ext import db
|
||||
|
||||
|
||||
@ -32,8 +30,11 @@ def _get_first_entry(iterable):
|
||||
|
||||
class BuildStatusLoader:
|
||||
""" Loads various build status data from the database."""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def load_build_status_data(self):
|
||||
@staticmethod
|
||||
def load_build_status_data():
|
||||
"""Returns the latest conclusive build status for each bot.
|
||||
|
||||
The statuses OK, failed and warnings are considered to be conclusive.
|
||||
@ -76,7 +77,8 @@ class BuildStatusLoader:
|
||||
|
||||
return bots_to_latest_conclusive_entry.values()
|
||||
|
||||
def load_last_modified_at(self):
|
||||
@staticmethod
|
||||
def load_last_modified_at():
|
||||
build_status_root = db.GqlQuery('SELECT * '
|
||||
'FROM BuildStatusRoot').get()
|
||||
if not build_status_root:
|
||||
@ -85,7 +87,8 @@ class BuildStatusLoader:
|
||||
|
||||
return build_status_root.last_updated_at
|
||||
|
||||
def compute_lkgr(self):
|
||||
@staticmethod
|
||||
def compute_lkgr():
|
||||
""" Finds the most recent revision for which all bots are green.
|
||||
|
||||
Returns:
|
||||
|
@ -10,18 +10,14 @@
|
||||
|
||||
"""Loads coverage data from the database."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import logging
|
||||
|
||||
from google.appengine.ext import db
|
||||
import gviz_api
|
||||
|
||||
|
||||
class CoverageDataLoader:
|
||||
""" Loads coverage data from the database."""
|
||||
|
||||
def load_coverage_json_data(self, report_category):
|
||||
@staticmethod
|
||||
def load_coverage_json_data(report_category):
|
||||
coverage_entries = db.GqlQuery('SELECT * '
|
||||
'FROM CoverageData '
|
||||
'WHERE report_category = :1 '
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
"""Connects all URLs with their respective handlers."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
from google.appengine.ext.webapp import template
|
||||
import webapp2
|
||||
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
"""Provides a OAuth request handler base class."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
from google.appengine.api import oauth
|
||||
import logging
|
||||
import webapp2
|
||||
@ -34,6 +32,8 @@ class OAuthPostRequestHandler(webapp2.RequestHandler):
|
||||
The handler will accept an OAuth request if it is correctly formed and
|
||||
the consumer is acting on behalf of an administrator for the dashboard.
|
||||
"""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def post(self):
|
||||
try:
|
||||
@ -50,7 +50,8 @@ class OAuthPostRequestHandler(webapp2.RequestHandler):
|
||||
"""Reads data from POST request and responds accordingly."""
|
||||
raise NotImplementedError('You must override this method!')
|
||||
|
||||
def _authenticate_user(self):
|
||||
@staticmethod
|
||||
def _authenticate_user():
|
||||
try:
|
||||
if oauth.is_current_user_admin():
|
||||
# The user on whose behalf we are acting is indeed an administrator
|
||||
|
@ -8,10 +8,7 @@
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import unittest
|
||||
from google.appengine.ext import db
|
||||
from google.appengine.ext import testbed
|
||||
|
||||
from add_build_status_data import BuildStatusData
|
||||
|
@ -10,11 +10,8 @@
|
||||
|
||||
"""Contains utilities for communicating with the dashboard."""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import httplib
|
||||
import shelve
|
||||
import urlparse
|
||||
import oauth.oauth as oauth
|
||||
|
||||
import constants
|
||||
@ -43,6 +40,8 @@ class DashboardConnection:
|
||||
|
||||
def __init__(self, consumer_key):
|
||||
self.consumer_key_ = consumer_key
|
||||
self.consumer_secret_ = None
|
||||
self.access_token_string_ = None
|
||||
|
||||
def read_required_files(self, consumer_secret_file, access_token_file):
|
||||
"""Reads required data for making OAuth requests.
|
||||
@ -119,16 +118,17 @@ class DashboardConnection:
|
||||
def _read_consumer_secret(self, filename):
|
||||
return self._read_shelve(filename, 'consumer_secret')
|
||||
|
||||
def _read_shelve(self, filename, key):
|
||||
input_file = shelve.open(filename)
|
||||
|
||||
if not input_file.has_key(key):
|
||||
raise FailedToReadRequiredInputFile('Missing correct %s file in current '
|
||||
'directory. You may have to run '
|
||||
'request_oauth_permission.py.' %
|
||||
filename)
|
||||
def _read_shelve(filename, key):
|
||||
input_file = shelve.open(filename)
|
||||
|
||||
result = input_file[key]
|
||||
input_file.close()
|
||||
if not input_file.has_key(key):
|
||||
raise FailedToReadRequiredInputFile('Missing correct %s file in current '
|
||||
'directory. You may have to run '
|
||||
'request_oauth_permission.py.' %
|
||||
filename)
|
||||
|
||||
return result
|
||||
result = input_file[key]
|
||||
input_file.close()
|
||||
|
||||
return result
|
||||
|
@ -30,8 +30,6 @@
|
||||
will be 'access_token' and 'consumer_secret', respectively.
|
||||
"""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import shelve
|
||||
import sys
|
||||
import urlparse
|
||||
|
@ -13,8 +13,6 @@
|
||||
Compatible with build bot 0.8.4 P1.
|
||||
"""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import re
|
||||
import urllib
|
||||
|
||||
|
@ -13,8 +13,6 @@
|
||||
Compatible with build bot 0.8.4 P1.
|
||||
"""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import unittest
|
||||
|
||||
import tgrid_parser
|
||||
@ -504,7 +502,7 @@ MINIMAL_IN_TRUNK_SOURCESTAMP = """
|
||||
class TGridParserTest(unittest.TestCase):
|
||||
def test_parser_throws_exception_on_empty_html(self):
|
||||
self.assertRaises(tgrid_parser.FailedToParseBuildStatus,
|
||||
tgrid_parser.parse_tgrid_page, '');
|
||||
tgrid_parser.parse_tgrid_page, '')
|
||||
|
||||
def test_parser_finds_successful_bot(self):
|
||||
result = tgrid_parser.parse_tgrid_page(MINIMAL_OK)
|
||||
@ -564,17 +562,6 @@ class TGridParserTest(unittest.TestCase):
|
||||
self.assertEqual('1576--LinuxValgrind', first_mapping[0])
|
||||
self.assertEqual('324--failed', first_mapping[1])
|
||||
|
||||
def test_parser_finds_exception_slave_lost_and_maps_to_failed(self):
|
||||
# Sometimes the transposed grid says "in trunk" in the source stamp, so
|
||||
# make sure we deal with that.
|
||||
result = tgrid_parser.parse_tgrid_page(MINIMAL_IN_TRUNK_SOURCESTAMP)
|
||||
|
||||
self.assertEqual(1, len(result), 'There is only one bot in the sample.')
|
||||
first_mapping = result.items()[0]
|
||||
|
||||
self.assertEqual('1576--LinuxValgrind', first_mapping[0])
|
||||
self.assertEqual('324--failed', first_mapping[1])
|
||||
|
||||
def test_parser_finds_all_bots_and_revisions_except_forced_builds(self):
|
||||
result = tgrid_parser.parse_tgrid_page(SAMPLE_FILE)
|
||||
|
||||
|
@ -29,6 +29,7 @@ class TrackBuildStatusTest(unittest.TestCase):
|
||||
def test_get_desired_bots(self):
|
||||
bot_to_status_mapping = copy.deepcopy(NORMAL_BOT_TO_STATUS_MAPPING)
|
||||
desired_bot_names = ['Linux32 Debug']
|
||||
# pylint: disable=W0212
|
||||
result = track_build_status._filter_undesired_bots(bot_to_status_mapping,
|
||||
desired_bot_names)
|
||||
self.assertEquals(1, len(result))
|
||||
|
@ -24,12 +24,9 @@
|
||||
/home/<build bot user>/www.
|
||||
"""
|
||||
|
||||
__author__ = 'phoglund@webrtc.org (Patrik Höglund)'
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import constants
|
||||
import dashboard_connection
|
||||
@ -154,7 +151,7 @@ def _parse_args():
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
report_category, directory_prefix = _parse_args()
|
||||
if report_category:
|
||||
_main(report_category, directory_prefix)
|
||||
category, dir_prefix = _parse_args()
|
||||
if category:
|
||||
_main(category, dir_prefix)
|
||||
|
||||
|
@ -74,7 +74,7 @@ def main():
|
||||
help='Lists all currently supported tests.')
|
||||
parser.add_option('-t', '--test', action='append', default=[],
|
||||
help='Which test to run. May be specified multiple times.')
|
||||
options, unused_args = parser.parse_args()
|
||||
options, _ = parser.parse_args()
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
test_dict = _WIN_TESTS
|
||||
|
Loading…
x
Reference in New Issue
Block a user