diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 03933b83a..dd78b129a 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -6,8 +6,6 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
-import os.path
-
def _LicenseHeader(input_api):
"""Returns the license header regexp."""
# Accept any year number from 2011 to the current year
@@ -67,10 +65,6 @@ def _CheckNoFRIEND_TEST(input_api, output_api):
'gtest\'s FRIEND_TEST() macro. Include testsupport/gtest_prod_util.h and '
'use FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))]
-def _IsLintWhitelisted(file_name):
- """ Checks if a file is whitelisted for lint check."""
- return True
-
def _CheckApprovedFilesLintClean(input_api, output_api,
source_file_filter=None):
"""Checks that all new or whitelisted .cc and .h files pass cpplint.py.
@@ -100,7 +94,7 @@ def _CheckApprovedFilesLintClean(input_api, output_api,
files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
# Note that moved/renamed files also count as added for svn.
- if (f.Action() == 'A' or _IsLintWhitelisted(f.LocalPath())):
+ if (f.Action() == 'A'):
files.append(f.AbsoluteLocalPath())
for file_name in files:
@@ -121,6 +115,27 @@ def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
# TODO(kjellander): Use presubmit_canned_checks.PanProjectChecks too.
results = []
+ results.extend(input_api.canned_checks.RunPylint(input_api, output_api,
+ black_list=(r'^.*gviz_api\.py$',
+ r'^.*gaeunit\.py$',
+ r'^third_party/.*\.py$',
+ r'^testing/.*\.py$',
+ r'^tools/gyp/.*\.py$',
+ r'^tools/perf_expectations/.*\.py$',
+ r'^tools/python/.*\.py$',
+ r'^tools/python_charts/data/.*\.py$',
+ r'^tools/refactoring.*\.py$',
+ # TODO(phoglund): should arguably be checked.
+ r'^tools/valgrind-webrtc/.*\.py$',
+ r'^tools/valgrind/.*\.py$',
+ # TODO(phoglund): should arguably be checked.
+ r'^webrtc/build/.*\.py$',
+ r'^build/.*\.py$',
+ r'^out/.*\.py$',),
+ disabled_warnings=['F0401', # Failed to import x
+ 'E0611', # No package y in x
+ 'W0232', # Class has no __init__ method
+ ]))
results.extend(input_api.canned_checks.CheckLongLines(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasNoTabs(
diff --git a/samples/js/apprtc/apprtc.py b/samples/js/apprtc/apprtc.py
index ffcf1c971..7be5054ba 100644
--- a/samples/js/apprtc/apprtc.py
+++ b/samples/js/apprtc/apprtc.py
@@ -2,15 +2,12 @@
#
# Copyright 2011 Google Inc. All Rights Reserved.
-# pylint: disable-msg=C6310
-
"""WebRTC Demo
This module demonstrates the WebRTC API by implementing a simple video chat app.
"""
import cgi
-import datetime
import logging
import os
import random
@@ -26,13 +23,13 @@ jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
# Lock for syncing DB operation in concurrent requests handling.
-# TODO(brave): keeping working on improving performance with thread syncing.
+# TODO(brave): keeping working on improving performance with thread syncing.
# One possible method for near future is to reduce the message caching.
LOCK = threading.RLock()
-def generate_random(len):
+def generate_random(length):
word = ''
- for i in range(len):
+ for _ in range(length):
word += random.choice('0123456789')
return word
@@ -66,18 +63,19 @@ def make_loopback_answer(message):
def maybe_add_fake_crypto(message):
if message.find("a=crypto") == -1:
index = len(message)
- crypto_line = "a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:BAADBAADBAADBAADBAADBAADBAADBAADBAADBAAD\\r\\n"
+ crypto_line = ("a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:"
+ "BAADBAADBAADBAADBAADBAADBAADBAADBAADBAAD\\r\\n")
# reverse find for multiple find and insert operations.
- index = message.rfind("c=IN", 0, index)
- while (index != -1):
+ index = message.rfind("c=IN", 0, index)
+ while (index != -1):
message = message[:index] + crypto_line + message[index:]
- index = message.rfind("c=IN", 0, index)
+ index = message.rfind("c=IN", 0, index)
return message
def handle_message(room, user, message):
message_obj = json.loads(message)
other_user = room.get_other_user(user)
- room_key = room.key().id_or_name();
+ room_key = room.key().id_or_name()
if message_obj['type'] == 'bye':
# This would remove the other_user in loopback test too.
# So check its availability before forwarding Bye message.
@@ -89,7 +87,7 @@ def handle_message(room, user, message):
# Special case the loopback scenario.
if other_user == user:
message = make_loopback_answer(message)
- # Workaround Chrome bug.
+ # Workaround Chrome bug.
# Insert a=crypto line into offer from FireFox.
# TODO(juberti): Remove this call.
message = maybe_add_fake_crypto(message)
@@ -108,14 +106,14 @@ def send_saved_messages(client_id):
messages = get_saved_messages(client_id)
for message in messages:
channel.send_message(client_id, message.msg)
- logging.info('Delivered saved message to ' + client_id);
+ logging.info('Delivered saved message to ' + client_id)
message.delete()
def on_message(room, user, message):
client_id = make_client_id(room, user)
if room.is_connected(user):
channel.send_message(client_id, message)
- logging.info('Delivered message to user ' + user);
+ logging.info('Delivered message to user ' + user)
else:
new_message = Message(client_id = client_id, msg = message)
new_message.put()
@@ -128,7 +126,7 @@ def make_media_constraints(hd_video):
# Demo with WHD by setting size with 1280x720.
constraints['mandatory']['minHeight'] = 720
constraints['mandatory']['minWidth'] = 1280
- # Disabled for now due to weird stretching behavior on Mac.
+ # Disabled for now due to weird stretching behavior on Mac.
#else:
# Demo with WVGA by setting Aspect Ration;
#constraints['mandatory']['maxAspectRatio'] = 1.778
@@ -172,13 +170,13 @@ class Room(db.Model):
user2_connected = db.BooleanProperty(default=False)
def __str__(self):
- str = '['
+ result = '['
if self.user1:
- str += "%s-%r" % (self.user1, self.user1_connected)
+ result += "%s-%r" % (self.user1, self.user1_connected)
if self.user2:
- str += ", %s-%r" % (self.user2, self.user2_connected)
- str += ']'
- return str
+ result += ", %s-%r" % (self.user2, self.user2_connected)
+ result += ']'
+ return result
def get_occupancy(self):
occupancy = 0
@@ -269,7 +267,8 @@ class DisconnectPage(webapp2.RequestHandler):
logging.info('User ' + user + ' removed from room ' + room_key)
logging.info('Room ' + room_key + ' has state ' + str(room))
if other_user and other_user != user:
- channel.send_message(make_client_id(room, other_user), '{"type":"bye"}')
+ channel.send_message(make_client_id(room, other_user),
+ '{"type":"bye"}')
logging.info('Sent BYE to ' + other_user)
logging.warning('User ' + user + ' disconnected from room ' + room_key)
@@ -288,7 +287,6 @@ class MessagePage(webapp2.RequestHandler):
class MainPage(webapp2.RequestHandler):
"""The main UI page, renders the 'index.html' template."""
-
def get(self):
"""Renders the main page. When this page is shown, we create a new
channel to push asynchronous updates to the client."""
@@ -353,7 +351,7 @@ class MainPage(webapp2.RequestHandler):
self.response.out.write(template.render({ 'room_key': room_key }))
logging.info('Room ' + room_key + ' is full')
return
-
+
room_link = base_url + '/?r=' + room_key
room_link = append_url_arguments(self.request, room_link)
token = create_channel(room, user, token_timeout)
diff --git a/samples/js/demos/js/demosite/main.py b/samples/js/demos/js/demosite/main.py
index a91fda932..1ee3710f9 100644
--- a/samples/js/demos/js/demosite/main.py
+++ b/samples/js/demos/js/demosite/main.py
@@ -14,24 +14,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
import webapp2
-import os
-from google.appengine.ext.webapp import template
class PageHandler(webapp2.RequestHandler):
- def get(self):
- base_url = self.request.path
- if self.request.path == '/':
- self.redirect("http://webrtc.googlecode.com/svn/trunk/"
- + "samples/js/demos/index.html"
- , permanent=True)
- else:
- self.redirect("http://webrtc.googlecode.com/svn/trunk/"
- + "samples/js/demos"
- + base_url,
- permanent=True)
+ def get(self):
+ base_url = self.request.path
+ if self.request.path == '/':
+ self.redirect("http://webrtc.googlecode.com/svn/trunk/"
+ + "samples/js/demos/index.html"
+ , permanent=True)
+ else:
+ self.redirect("http://webrtc.googlecode.com/svn/trunk/"
+ + "samples/js/demos"
+ + base_url,
+ permanent=True)
app = webapp2.WSGIApplication([
(r'/*.*', PageHandler),
- ], debug=True)
+], debug=True)
diff --git a/tools/barcode_tools/barcode_decoder.py b/tools/barcode_tools/barcode_decoder.py
index 362a794f6..4fddfba1e 100755
--- a/tools/barcode_tools/barcode_decoder.py
+++ b/tools/barcode_tools/barcode_decoder.py
@@ -77,7 +77,7 @@ def decode_frames(barcode_width, barcode_height, input_directory='.',
(bool): True if the decoding went without errors.
"""
jars = helper_functions.form_jars_string(path_to_zxing)
- command_line_decoder ='com.google.zxing.client.j2se.CommandLineRunner'
+ command_line_decoder = 'com.google.zxing.client.j2se.CommandLineRunner'
return helper_functions.perform_action_on_all_files(
directory=input_directory, file_pattern='frame_',
file_extension='png', start_number=1, action=_decode_barcode_in_file,
diff --git a/tools/barcode_tools/barcode_encoder.py b/tools/barcode_tools/barcode_encoder.py
index e2c162576..0e1cd3bad 100755
--- a/tools/barcode_tools/barcode_encoder.py
+++ b/tools/barcode_tools/barcode_encoder.py
@@ -9,7 +9,6 @@
import optparse
import os
-import subprocess
import sys
import helper_functions
@@ -41,7 +40,7 @@ def generate_upca_barcodes(number_of_barcodes, barcode_width, barcode_height,
"""
base_file_name = os.path.join(output_directory, "barcode_")
jars = helper_functions.form_jars_string(path_to_zxing)
- command_line_encoder ='com.google.zxing.client.j2se.CommandLineEncoder'
+ command_line_encoder = 'com.google.zxing.client.j2se.CommandLineEncoder'
barcode_width = str(barcode_width)
barcode_height = str(barcode_height)
@@ -109,7 +108,7 @@ def _convert_to_yuv_and_delete(output_directory, file_name, pattern):
try:
helper_functions.run_shell_command(
command, msg=('Error during PNG to YUV conversion of %s' %
- file_name));
+ file_name))
os.remove(file_name)
except helper_functions.HelperError, err:
print >> sys.stderr, err
@@ -154,7 +153,7 @@ def _add_to_file_and_delete(output_file, file_name):
input_file.close()
try:
os.remove(file_name)
- except Exception as e:
+ except Exception:
sys.stderr.write('Error in deleting file %s' % file_name)
return False
return True
diff --git a/tools/barcode_tools/build_zxing.py b/tools/barcode_tools/build_zxing.py
index 5656a46a5..71b5e603a 100755
--- a/tools/barcode_tools/build_zxing.py
+++ b/tools/barcode_tools/build_zxing.py
@@ -27,7 +27,7 @@ def run_ant_build_command(path_to_ant_build_file):
if process.returncode != 0:
print >> sys.stderr, 'Failed to execute: %s' % ' '.join(cmd)
return process.returncode
- except Exception as e:
+ except Exception:
print >> sys.stderr, 'Failed to execute: %s' % ' '.join(cmd)
return -1
diff --git a/tools/coverity/coverity.py b/tools/coverity/coverity.py
index a58889d00..fafd7ac22 100755
--- a/tools/coverity/coverity.py
+++ b/tools/coverity/coverity.py
@@ -117,7 +117,7 @@ def _ReleaseLock(lock_file, lock_filename):
os.remove(lock_filename)
-def run_coverity(options, args):
+def run_coverity(options):
"""Runs all the selected tests for the given build type and target."""
# Create the lock file to prevent another instance of this script from
# running.
@@ -155,9 +155,9 @@ def run_coverity(options, args):
# Do a clean build. Remove the build output directory first.
if sys.platform.startswith('linux'):
- rm_path = os.path.join(options.source_dir,'out',options.target)
+ rm_path = os.path.join(options.source_dir, 'out', options.target)
elif sys.platform == 'win32':
- rm_path = os.path.join(options.source_dir,options.solution_dir,
+ rm_path = os.path.join(options.source_dir, options.solution_dir,
options.target)
elif sys.platform == 'darwin':
rm_path = os.path.join(options.source_dir,'xcodebuild')
@@ -169,16 +169,16 @@ def run_coverity(options, args):
if options.dry_run:
print 'shutil.rmtree(%s)' % repr(rm_path)
else:
- shutil.rmtree(rm_path,True)
+ shutil.rmtree(rm_path, True)
if options.preserve_intermediate_dir:
- print 'Preserving intermediate directory.'
+ print 'Preserving intermediate directory.'
else:
if options.dry_run:
print 'shutil.rmtree(%s)' % repr(options.coverity_intermediate_dir)
print 'os.mkdir(%s)' % repr(options.coverity_intermediate_dir)
else:
- shutil.rmtree(options.coverity_intermediate_dir,True)
+ shutil.rmtree(options.coverity_intermediate_dir, True)
os.mkdir(options.coverity_intermediate_dir)
print 'Elapsed time: %ds' % (time.time() - start_time)
@@ -316,8 +316,8 @@ def main():
action='store_true', help=helpmsg,
default=False)
- options, args = option_parser.parse_args()
- return run_coverity(options, args)
+ options, _ = option_parser.parse_args()
+ return run_coverity(options)
if '__main__' == __name__:
diff --git a/tools/e2e_quality/audio/run_audio_test.py b/tools/e2e_quality/audio/run_audio_test.py
index ae1b09c52..51caa42cd 100755
--- a/tools/e2e_quality/audio/run_audio_test.py
+++ b/tools/e2e_quality/audio/run_audio_test.py
@@ -23,7 +23,6 @@ import re
import shlex
import subprocess
import sys
-import threading
import time
import perf.perf_utils
@@ -49,7 +48,7 @@ def main(argv):
help='command-line arguments for comparison tool')
parser.add_option('--regexp',
help='regular expression to extract the comparison metric')
- (options, args) = parser.parse_args(argv[1:])
+ options, _ = parser.parse_args(argv[1:])
# Get the initial default capture device, to restore later.
command = ['pacmd', 'list-sources']
diff --git a/tools/network_emulator/network_emulator.py b/tools/network_emulator/network_emulator.py
index 2876939ea..9ae1bdb5b 100644
--- a/tools/network_emulator/network_emulator.py
+++ b/tools/network_emulator/network_emulator.py
@@ -80,7 +80,8 @@ class NetworkEmulator(object):
'any', self._port_range)
logging.debug('Created outgoing rule: %s', outgoing_rule_id)
- def check_permissions(self):
+ @staticmethod
+ def check_permissions():
"""Checks if permissions are available to run Dummynet commands.
Raises:
@@ -88,19 +89,20 @@ class NetworkEmulator(object):
available.
"""
if os.geteuid() != 0:
- self._run_shell_command(
+ _run_shell_command(
['sudo', '-n', 'ipfw', '-h'],
msg=('Cannot run \'ipfw\' command. This script must be run as '
'root or have password-less sudo access to this command.'))
- def cleanup(self):
+ @staticmethod
+ def cleanup():
"""Stops the network emulation by flushing all Dummynet rules.
Notice that this will flush any rules that may have been created previously
before starting the emulation.
"""
- self._run_shell_command(['sudo', 'ipfw', '-f', 'flush'],
- 'Failed to flush Dummynet rules!')
+ _run_shell_command(['sudo', 'ipfw', '-f', 'flush'],
+ 'Failed to flush Dummynet rules!')
def _create_dummynet_rule(self, pipe_id, from_address, to_address,
port_range):
@@ -121,10 +123,10 @@ class NetworkEmulator(object):
self._rule_counter += 100
add_part = ['sudo', 'ipfw', 'add', self._rule_counter, 'pipe', pipe_id,
'ip', 'from', from_address, 'to', to_address]
- self._run_shell_command(add_part + ['src-port', '%s-%s' % port_range],
- 'Failed to add Dummynet src-port rule.')
- self._run_shell_command(add_part + ['dst-port', '%s-%s' % port_range],
- 'Failed to add Dummynet dst-port rule.')
+ _run_shell_command(add_part + ['src-port', '%s-%s' % port_range],
+ 'Failed to add Dummynet src-port rule.')
+ _run_shell_command(add_part + ['dst-port', '%s-%s' % port_range],
+ 'Failed to add Dummynet dst-port rule.')
return self._rule_counter
def _create_dummynet_pipe(self, bandwidth_kbps, delay_ms, packet_loss_percent,
@@ -149,30 +151,31 @@ class NetworkEmulator(object):
if sys.platform.startswith('linux'):
error_message += ('Make sure you have loaded the ipfw_mod.ko module to '
'your kernel (sudo insmod /path/to/ipfw_mod.ko)')
- self._run_shell_command(cmd, error_message)
+ _run_shell_command(cmd, error_message)
return self._pipe_counter
- def _run_shell_command(self, command, msg=None):
- """Executes a command.
- Args:
- command: Command list to execute.
- msg: Message describing the error in case the command fails.
+def _run_shell_command(command, msg=None):
+ """Executes a command.
- Returns:
- The standard output from running the command.
+ Args:
+ command: Command list to execute.
+ msg: Message describing the error in case the command fails.
- Raises:
- NetworkEmulatorError: If command fails. Message is set by the msg
- parameter.
- """
- cmd_list = [str(x) for x in command]
- cmd = ' '.join(cmd_list)
- logging.debug('Running command: %s', cmd)
+ Returns:
+ The standard output from running the command.
- process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- output, error = process.communicate()
- if process.returncode != 0:
- raise NetworkEmulatorError(msg, cmd, process.returncode, output, error)
- return output.strip()
+ Raises:
+ NetworkEmulatorError: If command fails. Message is set by the msg
+ parameter.
+ """
+ cmd_list = [str(x) for x in command]
+ cmd = ' '.join(cmd_list)
+ logging.debug('Running command: %s', cmd)
+
+ process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ output, error = process.communicate()
+ if process.returncode != 0:
+ raise NetworkEmulatorError(msg, cmd, process.returncode, output, error)
+ return output.strip()
diff --git a/tools/python_charts/data/vp8_hw.py b/tools/python_charts/data/vp8_hw.py
index b8c6cc03e..80900d9f7 100644
--- a/tools/python_charts/data/vp8_hw.py
+++ b/tools/python_charts/data/vp8_hw.py
@@ -1,6 +1,7 @@
# Sample output from the video_quality_measurment program, included only for
# reference. Geneate your own by running with the --python flag and then change
# the filenames in main.py
+
test_configuration = [{'name': 'name', 'value': 'VP8 hardware test'},
{'name': 'description', 'value': ''},
{'name': 'test_number', 'value': '0'},
diff --git a/tools/python_charts/data/vp8_sw.py b/tools/python_charts/data/vp8_sw.py
index 0f2913724..c082329d3 100644
--- a/tools/python_charts/data/vp8_sw.py
+++ b/tools/python_charts/data/vp8_sw.py
@@ -1,6 +1,7 @@
# Sample output from the video_quality_measurment program, included only for
# reference. Geneate your own by running with the --python flag and then change
# the filenames in main.py
+
test_configuration = [{'name': 'name', 'value': 'VP8 software test'},
{'name': 'description', 'value': ''},
{'name': 'test_number', 'value': '0'},
diff --git a/tools/python_charts/webrtc/data_helper.py b/tools/python_charts/webrtc/data_helper.py
index fce949f98..80cc78f9e 100644
--- a/tools/python_charts/webrtc/data_helper.py
+++ b/tools/python_charts/webrtc/data_helper.py
@@ -17,17 +17,17 @@ class DataHelper(object):
def __init__(self, data_list, table_description, names_list, messages):
""" Initializes the DataHelper with data.
-
+
Args:
- data_list: List of one or more data lists in the format that the
+ data_list: List of one or more data lists in the format that the
Google Visualization Python API expects (list of dictionaries, one
- per row of data). See the gviz_api.DataTable documentation for more
+ per row of data). See the gviz_api.DataTable documentation for more
info.
table_description: dictionary describing the data types of all
columns in the data lists, as defined in the gviz_api.DataTable
documentation.
names_list: List of strings of what we're going to name the data
- columns after. Usually different runs of data collection.
+ columns after. Usually different runs of data collection.
messages: List of strings we might append error messages to.
"""
self.data_list = data_list
@@ -36,29 +36,29 @@ class DataHelper(object):
self.messages = messages
self.number_of_datasets = len(data_list)
self.number_of_frames = len(data_list[0])
-
+
def CreateData(self, field_name, start_frame=0, end_frame=0):
""" Creates a data structure for a specified data field.
-
- Creates a data structure (data type description dictionary and a list
- of data dictionaries) to be used with the Google Visualization Python
+
+ Creates a data structure (data type description dictionary and a list
+ of data dictionaries) to be used with the Google Visualization Python
API. The frame_number column is always present and one column per data
- set is added and its field name is suffixed by _N where N is the number
+ set is added and its field name is suffixed by _N where N is the number
of the data set (0, 1, 2...)
-
+
Args:
field_name: String name of the field, must be present in the data
structure this DataHelper was created with.
start_frame: Frame number to start at (zero indexed). Default: 0.
- end_frame: Frame number to be the last frame. If zero all frames
+ end_frame: Frame number to be the last frame. If zero all frames
will be included. Default: 0.
-
+
Returns:
A tuple containing:
- a dictionary describing the columns in the data result_data_table below.
- This description uses the name for each data set specified by
- names_list.
-
+ This description uses the name for each data set specified by
+ names_list.
+
Example with two data sets named 'Foreman' and 'Crew':
{
'frame_number': ('number', 'Frame number'),
@@ -66,36 +66,36 @@ class DataHelper(object):
'ssim_1': ('number', 'Crew'),
}
- a list containing dictionaries (one per row) with the frame_number
- column and one column of the specified field_name column per data
- set.
-
+ column and one column of the specified field_name column per data
+ set.
+
Example with two data sets named 'Foreman' and 'Crew':
[
{'frame_number': 0, 'ssim_0': 0.98, 'ssim_1': 0.77 },
{'frame_number': 1, 'ssim_0': 0.81, 'ssim_1': 0.53 },
]
"""
-
+
# Build dictionary that describes the data types
- result_table_description = {'frame_number': ('string', 'Frame number')}
+ result_table_description = {'frame_number': ('string', 'Frame number')}
for dataset_index in range(self.number_of_datasets):
column_name = '%s_%s' % (field_name, dataset_index)
column_type = self.table_description[field_name][0]
column_description = self.names_list[dataset_index]
result_table_description[column_name] = (column_type, column_description)
- # Build data table of all the data
+ # Build data table of all the data
result_data_table = []
- # We're going to have one dictionary per row.
+ # We're going to have one dictionary per row.
# Create that and copy frame_number values from the first data set
for source_row in self.data_list[0]:
row_dict = { 'frame_number': source_row['frame_number'] }
result_data_table.append(row_dict)
-
+
# Pick target field data points from the all data tables
if end_frame == 0: # Default to all frames
end_frame = self.number_of_frames
-
+
for dataset_index in range(self.number_of_datasets):
for row_number in range(start_frame, end_frame):
column_name = '%s_%s' % (field_name, dataset_index)
@@ -105,14 +105,14 @@ class DataHelper(object):
self.data_list[dataset_index][row_number][field_name]
except IndexError:
self.messages.append("Couldn't find frame data for row %d "
- "for %s" % (row_number, self.names_list[dataset_index]))
+ "for %s" % (row_number, self.names_list[dataset_index]))
break
return result_table_description, result_data_table
- def GetOrdering(self, table_description):
+ def GetOrdering(self, table_description): # pylint: disable=R0201
""" Creates a list of column names, ordered alphabetically except for the
frame_number column which always will be the first column.
-
+
Args:
table_description: A dictionary of column definitions as defined by the
gviz_api.DataTable documentation.
@@ -121,9 +121,9 @@ class DataHelper(object):
remaining columns are sorted alphabetically.
"""
# The JSON data representation generated from gviz_api.DataTable.ToJSon()
- # must have frame_number as its first column in order for the chart to
+ # must have frame_number as its first column in order for the chart to
# use it as it's X-axis value series.
- # gviz_api.DataTable orders the columns by name by default, which will
+ # gviz_api.DataTable orders the columns by name by default, which will
# be incorrect if we have column names that are sorted before frame_number
# in our data table.
columns_ordering = ['frame_number']
@@ -132,8 +132,8 @@ class DataHelper(object):
if column != 'frame_number':
columns_ordering.append(column)
return columns_ordering
-
- def CreateConfigurationTable(self, configurations):
+
+ def CreateConfigurationTable(self, configurations): # pylint: disable=R0201
""" Combines multiple test data configurations for display.
Args:
@@ -175,9 +175,9 @@ class DataHelper(object):
for configuration in configurations:
data = {}
result_data.append(data)
- for dict in configuration:
- name = dict['name']
- value = dict['value']
+ for values in configuration:
+ name = values['name']
+ value = values['value']
result_description[name] = 'string'
data[name] = value
return result_description, result_data
diff --git a/tools/python_charts/webrtc/data_helper_test.py b/tools/python_charts/webrtc/data_helper_test.py
index 6282f7b05..53b31bdee 100644
--- a/tools/python_charts/webrtc/data_helper_test.py
+++ b/tools/python_charts/webrtc/data_helper_test.py
@@ -7,8 +7,6 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
-__author__ = 'kjellander@webrtc.org (Henrik Kjellander)'
-
import unittest
import webrtc.data_helper
@@ -16,13 +14,13 @@ class Test(unittest.TestCase):
def setUp(self):
# Simulate frame data from two different test runs, with 2 frames each.
- self.frame_data_0 = [{'frame_number': 0, 'ssim': 0.5, 'psnr': 30.5},
+ self.frame_data_0 = [{'frame_number': 0, 'ssim': 0.5, 'psnr': 30.5},
{'frame_number': 1, 'ssim': 0.55, 'psnr': 30.55}]
self.frame_data_1 = [{'frame_number': 0, 'ssim': 0.6, 'psnr': 30.6},
{'frame_number': 0, 'ssim': 0.66, 'psnr': 30.66}]
self.all_data = [ self.frame_data_0, self.frame_data_1 ]
-
- # Test with frame_number column in a non-first position sice we need to
+
+ # Test with frame_number column in a non-first position sice we need to
# support reordering that to be able to use the gviz_api as we want.
self.type_description = {
'ssim': ('number', 'SSIM'),
@@ -66,13 +64,13 @@ class Test(unittest.TestCase):
self.assertEquals(1, row['frame_number'])
self.assertEquals(0.55, row['ssim_0'])
self.assertEquals(0.66, row['ssim_1'])
-
- description, data_table = helper.CreateData('psnr')
+
+ description, data_table = helper.CreateData('psnr')
self.assertEqual(3, len(description))
self.assertTrue('frame_number' in description)
self.assertTrue('psnr_0' in description)
- self.assertTrue('psnr_1' in description)
- self.assertEqual(0, len(messages))
+ self.assertTrue('psnr_1' in description)
+ self.assertEqual(0, len(messages))
self.assertEquals(2, len(data_table))
row = data_table[0]
@@ -83,21 +81,21 @@ class Test(unittest.TestCase):
self.assertEquals(1, row['frame_number'])
self.assertEquals(30.55, row['psnr_0'])
self.assertEquals(30.66, row['psnr_1'])
-
+
def testGetOrdering(self):
- """ Tests that the ordering help method returns a list with frame_number
+ """ Tests that the ordering help method returns a list with frame_number
first and the rest sorted alphabetically """
messages = []
helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
self.names, messages)
- description, data_table = helper.CreateData('ssim')
+ description, _ = helper.CreateData('ssim')
columns = helper.GetOrdering(description)
self.assertEqual(3, len(columns))
self.assertEqual(0, len(messages))
self.assertEqual('frame_number', columns[0])
self.assertEqual('ssim_0', columns[1])
self.assertEqual('ssim_1', columns[2])
-
+
def testCreateConfigurationTable(self):
messages = []
helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
@@ -110,6 +108,6 @@ class Test(unittest.TestCase):
self.assertTrue(description.has_key('input_filename'))
self.assertEquals('Test 0', data[0]['name'])
self.assertEquals('Test 1', data[1]['name'])
-
+
if __name__ == "__main__":
unittest.main()
diff --git a/tools/python_charts/webrtc/main.py b/tools/python_charts/webrtc/main.py
index 82d8831b3..c2e2b822b 100644
--- a/tools/python_charts/webrtc/main.py
+++ b/tools/python_charts/webrtc/main.py
@@ -7,30 +7,28 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
-__author__ = 'kjellander@webrtc.org (Henrik Kjellander)'
-
import os
import gviz_api
import webrtc.data_helper
def main():
"""
- This Python script displays a web page with test created with the
+ This Python script displays a web page with test created with the
video_quality_measurement program, which is a tool in WebRTC.
-
+
The script requires on two external files and one Python library:
- - A HTML template file with layout and references to the json variables
+ - A HTML template file with layout and references to the json variables
defined in this script
- A data file in Python format, containing the following:
- test_configuration - a dictionary of test configuration names and values.
- - frame_data_types - a dictionary that maps the different metrics to their
+ - frame_data_types - a dictionary that maps the different metrics to their
data types.
- - frame_data - a list of dictionaries where each dictionary maps a metric to
- it's value.
+ - frame_data - a list of dictionaries where each dictionary maps a metric to
+ it's value.
- The gviz_api.py of the Google Visualization Python API, available at
http://code.google.com/p/google-visualization-python/
-
- The HTML file is shipped with the script, while the data file must be
+
+ The HTML file is shipped with the script, while the data file must be
generated by running video_quality_measurement with the --python flag
specified.
"""
@@ -48,78 +46,78 @@ def main():
page_template = f.read()
f.close()
except IOError as e:
- ShowErrorPage('Cannot open page template file: %s
Details: %s' %
+ ShowErrorPage('Cannot open page template file: %s
Details: %s' %
(page_template_filename, e))
return
-
+
# Read data from external Python script files. First check that they exist.
for filename in data_filenames:
if not os.path.exists(filename):
messages.append('Cannot open data file: %s' % filename)
data_filenames.remove(filename)
-
+
# Read data from all existing input files.
data_list = []
test_configurations = []
names = []
-
+
for filename in data_filenames:
read_vars = {} # empty dictionary to load the data into.
execfile(filename, read_vars, read_vars)
-
+
test_configuration = read_vars['test_configuration']
table_description = read_vars['frame_data_types']
table_data = read_vars['frame_data']
-
+
# Verify the data in the file loaded properly.
if not table_description or not table_data:
messages.append('Invalid input file: %s. Missing description list or '
'data dictionary variables.' % filename)
continue
-
+
# Frame numbers appear as number type in the data, but Chart API requires
# values of the X-axis to be of string type.
- # Change the frame_number column data type:
+ # Change the frame_number column data type:
table_description['frame_number'] = ('string', 'Frame number')
- # Convert all the values to string types:
+ # Convert all the values to string types:
for row in table_data:
row['frame_number'] = str(row['frame_number'])
-
+
# Store the unique data from this file in the high level lists.
test_configurations.append(test_configuration)
data_list.append(table_data)
# Name of the test run must be present.
test_name = FindConfiguration(test_configuration, 'name')
if not test_name:
- messages.append('Invalid input file: %s. Missing configuration key '
+ messages.append('Invalid input file: %s. Missing configuration key '
'"name"', filename)
continue
names.append(test_name)
-
+
# Create data helper and build data tables for each graph.
- helper = webrtc.data_helper.DataHelper(data_list, table_description,
+ helper = webrtc.data_helper.DataHelper(data_list, table_description,
names, messages)
-
+
# Loading it into gviz_api.DataTable objects and create JSON strings.
description, data = helper.CreateConfigurationTable(test_configurations)
configurations = gviz_api.DataTable(description, data)
json_configurations = configurations.ToJSon()
-
+
description, data = helper.CreateData('ssim')
ssim = gviz_api.DataTable(description, data)
json_ssim_data = ssim.ToJSon(helper.GetOrdering(description))
-
+
description, data = helper.CreateData('psnr')
psnr = gviz_api.DataTable(description, data)
json_psnr_data = psnr.ToJSon(helper.GetOrdering(description))
-
+
description, data = helper.CreateData('packets_dropped')
packet_loss = gviz_api.DataTable(description, data)
- json_packet_loss_data = packet_loss.ToJSon(helper.GetOrdering(description))
-
+ json_packet_loss_data = packet_loss.ToJSon(helper.GetOrdering(description))
+
description, data = helper.CreateData('bit_rate')
# Add a column of data points for the desired bit rate to be plotted.
- # (uses test configuration from the last data set, assuming it is the same
+ # (uses test configuration from the last data set, assuming it is the same
# for all of them)
desired_bit_rate = FindConfiguration(test_configuration, 'bit_rate_in_kbps')
if not desired_bit_rate:
@@ -135,12 +133,12 @@ def main():
# Format the messages list with newlines.
messages = '\n'.join(messages)
-
+
# Put the variables as JSon strings into the template.
print page_template % vars()
def FindConfiguration(configuration, name):
- """ Finds a configuration value using it's name.
+ """ Finds a configuration value using it's name.
Returns the first configuration with a matching name. Returns None if no
matching configuration is found. """
return_value = None
@@ -152,6 +150,6 @@ def FindConfiguration(configuration, name):
def ShowErrorPage(error_message):
print '