Format testing/tools with git cl format --python.
Change-Id: I27850a5ea7a5b3d0fc3db4012ae9deb584a0f324
Reviewed-on: https://pdfium-review.googlesource.com/c/pdfium/+/62530
Reviewed-by: Tom Sepez <tsepez@chromium.org>
Commit-Queue: Lei Zhang <thestig@chromium.org>
diff --git a/testing/tools/api_check.py b/testing/tools/api_check.py
index da2869f..0411432 100755
--- a/testing/tools/api_check.py
+++ b/testing/tools/api_check.py
@@ -2,7 +2,6 @@
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Verifies exported functions in public/*.h are in fpdf_view_c_api_test.c.
This script gathers a list of functions from public/*.h that contain
@@ -17,6 +16,7 @@
import re
import sys
+
def _IsValidFunctionName(function, filename):
if function.startswith('FPDF'):
return True
@@ -70,14 +70,16 @@
functions.extend(_GetExportsFromHeader(public_path, filename))
return functions
+
def _CheckSorted(functions, api_test_path):
unsorted_functions = set()
for i in range(len(functions) - 1):
- if functions[i] > functions[i+1]:
+ if functions[i] > functions[i + 1]:
unsorted_functions.add(functions[i])
- unsorted_functions.add(functions[i+1])
+ unsorted_functions.add(functions[i + 1])
return unsorted_functions
+
def _GetFunctionsFromTest(api_test_path):
chk_regex = re.compile('^ CHK\((.*)\);\n$')
file_regex = re.compile('^ //.*\.h\n$')
@@ -122,21 +124,24 @@
unsorted_functions = set()
for functions in test_functions_per_section:
unsorted_functions |= _CheckSorted(functions, api_test_path)
- check = _CheckAndPrintFailures(unsorted_functions,
+ check = _CheckAndPrintFailures(
+ unsorted_functions,
'Found CHKs that are not in alphabetical order within each section in %s'
% api_test_path)
result = result and check
duplicate_public_functions = _FindDuplicates(public_functions)
check = _CheckAndPrintFailures(duplicate_public_functions,
- 'Found duplicate functions in public headers')
+ 'Found duplicate functions in public headers')
result = result and check
- test_functions = [function for functions in test_functions_per_section
- for function in functions]
+ test_functions = [
+ function for functions in test_functions_per_section
+ for function in functions
+ ]
duplicate_test_functions = _FindDuplicates(test_functions)
check = _CheckAndPrintFailures(duplicate_test_functions,
- 'Found duplicate functions in API test')
+ 'Found duplicate functions in API test')
result = result and check
public_functions_set = set(public_functions)
@@ -149,9 +154,8 @@
result = result and check
if not result:
- print ('Some checks failed. Make sure %s is in sync with the public API '
- 'headers.'
- % api_test_relative_path)
+ print('Some checks failed. Make sure %s is in sync with the public API '
+ 'headers.' % api_test_relative_path)
return 1
return 0
diff --git a/testing/tools/common.py b/testing/tools/common.py
index da3cac8..108fcfd 100755
--- a/testing/tools/common.py
+++ b/testing/tools/common.py
@@ -10,6 +10,7 @@
import subprocess
import sys
+
def os_name():
if sys.platform.startswith('linux'):
return 'linux'
@@ -28,7 +29,8 @@
return e
-def RunCommandPropagateErr(cmd, stdout_has_errors=False,
+def RunCommandPropagateErr(cmd,
+ stdout_has_errors=False,
exit_status_on_error=None):
"""Run a command as a subprocess.
@@ -72,7 +74,7 @@
for line in output.split('\n'):
line = line.strip()
if line.startswith("MD5:"):
- ret.append([x.strip() for x in line.lstrip("MD5:").rsplit(":", 1)])
+ ret.append([x.strip() for x in line.lstrip("MD5:").rsplit(":", 1)])
return None, ret
except subprocess.CalledProcessError as e:
return e, None
@@ -141,14 +143,14 @@
os.chdir(cwd)
arg_match_output = re.search('%s = (.*)' % arg_name, gn_args_output).group(1)
if verbose:
- print >> sys.stderr, "Found '%s' for value of %s" % (arg_match_output, arg_name)
+ print >> sys.stderr, "Found '%s' for value of %s" % (arg_match_output,
+ arg_name)
return arg_match_output == 'true'
def PrintWithTime(s):
"""Prints s prepended by a timestamp."""
- print '[%s] %s' % (datetime.datetime.now().strftime("%Y%m%d %H:%M:%S"),
- s)
+ print '[%s] %s' % (datetime.datetime.now().strftime("%Y%m%d %H:%M:%S"), s)
def PrintErr(s):
diff --git a/testing/tools/fixup_pdf_template.py b/testing/tools/fixup_pdf_template.py
index 0a6191f..ee47c4b 100755
--- a/testing/tools/fixup_pdf_template.py
+++ b/testing/tools/fixup_pdf_template.py
@@ -2,7 +2,6 @@
# Copyright 2014 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Expands a hand-written PDF testcase (template) into a valid PDF file.
There are several places in a PDF file where byte-offsets are required. This
@@ -140,7 +139,7 @@
try:
with open(input_path, 'rb') as infile:
for line in infile:
- match = re.match(r'\s*\{\{include\s+(.+)\}\}', line);
+ match = re.match(r'\s*\{\{include\s+(.+)\}\}', line)
if match:
insert_includes(
os.path.join(os.path.dirname(input_path), match.group(1)),
diff --git a/testing/tools/githelper.py b/testing/tools/githelper.py
index 2e94196..61d5954 100644
--- a/testing/tools/githelper.py
+++ b/testing/tools/githelper.py
@@ -1,7 +1,6 @@
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Classes for dealing with git."""
import subprocess
@@ -42,13 +41,12 @@
def GetCurrentBranchName(self):
"""Returns a string with the current branch name."""
- return RunCommandPropagateErr(
- ['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
- exit_status_on_error=1).strip()
+ return RunCommandPropagateErr(['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
+ exit_status_on_error=1).strip()
def GetCurrentBranchHash(self):
- return RunCommandPropagateErr(
- ['git', 'rev-parse', 'HEAD'], exit_status_on_error=1).strip()
+ return RunCommandPropagateErr(['git', 'rev-parse', 'HEAD'],
+ exit_status_on_error=1).strip()
def IsCurrentBranchClean(self):
output = RunCommandPropagateErr(['git', 'status', '--porcelain'],
@@ -57,8 +55,8 @@
def BranchExists(self, branch_name):
"""Return whether a branch with the given name exists."""
- output = RunCommandPropagateErr(['git', 'rev-parse', '--verify',
- branch_name])
+ output = RunCommandPropagateErr(
+ ['git', 'rev-parse', '--verify', branch_name])
return output is not None
def CloneLocal(self, source_repo, new_repo):
diff --git a/testing/tools/gold.py b/testing/tools/gold.py
index 2987508..3e552a3 100644
--- a/testing/tools/gold.py
+++ b/testing/tools/gold.py
@@ -2,7 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
import json
import os
import shlex
@@ -18,7 +17,7 @@
kv_pairs = shlex.split(kv_str)
if len(kv_pairs) % 2:
raise ValueError('Uneven number of key/value pairs. Got %s' % kv_str)
- return { kv_pairs[i]:kv_pairs[i + 1] for i in xrange(0, len(kv_pairs), 2) }
+ return {kv_pairs[i]: kv_pairs[i + 1] for i in xrange(0, len(kv_pairs), 2)}
# This module downloads a json provided by Skia Gold with the expected baselines
@@ -81,15 +80,15 @@
c_type = response.headers.get('Content-type', '')
EXPECTED_CONTENT_TYPE = 'application/json'
if c_type != EXPECTED_CONTENT_TYPE:
- raise ValueError('Invalid content type. Got %s instead of %s' % (
- c_type, EXPECTED_CONTENT_TYPE))
+ raise ValueError('Invalid content type. Got %s instead of %s' %
+ (c_type, EXPECTED_CONTENT_TYPE))
json_data = response.read()
break # If this line is reached, then no exception occurred.
except (ssl.SSLError, urllib2.HTTPError, urllib2.URLError) as e:
timeout *= 2
if timeout < MAX_TIMEOUT:
continue
- print ('Error: Unable to read skia gold json from %s: %s' % (url, e))
+ print('Error: Unable to read skia gold json from %s: %s' % (url, e))
return None
try:
@@ -130,8 +129,8 @@
if md5_hash in self._baselines[test_name]:
return GoldBaseline.MATCH
- return (GoldBaseline.MISMATCH if found_test_case
- else GoldBaseline.NO_BASELINE)
+ return (GoldBaseline.MISMATCH
+ if found_test_case else GoldBaseline.NO_BASELINE)
# This module collects and writes output in a format expected by the
@@ -177,6 +176,7 @@
# }
#
class GoldResults(object):
+
def __init__(self, source_type, output_dir, properties_str, key_str,
ignore_hashes_file):
"""
@@ -194,7 +194,7 @@
self._source_type = source_type
self._properties = _ParseKeyValuePairs(properties_str)
self._properties['key'] = _ParseKeyValuePairs(key_str)
- self._results = []
+ self._results = []
self._passfail = []
self._output_dir = output_dir
@@ -206,7 +206,7 @@
self._ignore_hashes = set()
if ignore_hashes_file:
with open(ignore_hashes_file, 'r') as ig_file:
- hashes=[x.strip() for x in ig_file.readlines() if x.strip()]
+ hashes = [x.strip() for x in ig_file.readlines() if x.strip()]
self._ignore_hashes = set(hashes)
def AddTestResult(self, testName, md5Hash, outputImagePath, matchResult):
@@ -222,23 +222,21 @@
# Add an entry to the list of test results
self._results.append({
- 'key': {
- 'name': testName,
- 'source_type': self._source_type,
- },
- 'md5': md5Hash,
- 'options': {
- 'ext': imgExt,
- 'gamma_correct': 'no'
- }
+ 'key': {
+ 'name': testName,
+ 'source_type': self._source_type,
+ },
+ 'md5': md5Hash,
+ 'options': {
+ 'ext': imgExt,
+ 'gamma_correct': 'no'
+ }
})
self._passfail.append((testName, matchResult))
def WriteResults(self):
- self._properties.update({
- 'results': self._results
- })
+ self._properties.update({'results': self._results})
output_file_name = os.path.join(self._output_dir, 'dm.json')
with open(output_file_name, 'wb') as outfile:
@@ -250,6 +248,7 @@
json.dump(self._passfail, outfile, indent=1)
outfile.write('\n')
+
# Produce example output for manual testing.
if __name__ == '__main__':
# Create a test directory with three empty 'image' files.
diff --git a/testing/tools/pngdiffer.py b/testing/tools/pngdiffer.py
index baed33b..f5b762c 100755
--- a/testing/tools/pngdiffer.py
+++ b/testing/tools/pngdiffer.py
@@ -11,14 +11,15 @@
import common
+
class PNGDiffer():
+
def __init__(self, finder):
self.pdfium_diff_path = finder.ExecutablePath('pdfium_diff')
self.os_name = finder.os_name
def CheckMissingTools(self, regenerate_expected):
- if (regenerate_expected and
- self.os_name == 'linux' and
+ if (regenerate_expected and self.os_name == 'linux' and
not distutils.spawn.find_executable('optipng')):
return 'Please install "optipng" to regenerate expected images.'
return None
@@ -55,8 +56,8 @@
if page == 0:
print "WARNING: no expected results files for " + input_filename
if os.path.exists(actual_path):
- print ('FAILURE: Missing expected result for 0-based page %d of %s'
- % (page, input_filename))
+ print('FAILURE: Missing expected result for 0-based page %d of %s' %
+ (page, input_filename))
return True
break
print "Checking " + actual_path
@@ -65,7 +66,7 @@
error = common.RunCommand(
[self.pdfium_diff_path, expected_path, actual_path])
else:
- error = 1;
+ error = 1
if error:
# When failed, we check against platform based results.
if os.path.exists(platform_expected_path):
@@ -115,8 +116,8 @@
input_root, _ = os.path.splitext(input_filename)
self.actual_path_template = os.path.join(working_dir,
input_root + ACTUAL_TEMPLATE)
- self.expected_path = os.path.join(
- source_dir, input_root + EXPECTED_TEMPLATE)
+ self.expected_path = os.path.join(source_dir,
+ input_root + EXPECTED_TEMPLATE)
self.platform_expected_path = os.path.join(
source_dir, input_root + PLATFORM_EXPECTED_TEMPLATE)
diff --git a/testing/tools/run_corpus_tests.py b/testing/tools/run_corpus_tests.py
index 1175de4..5c1a207 100755
--- a/testing/tools/run_corpus_tests.py
+++ b/testing/tools/run_corpus_tests.py
@@ -7,12 +7,13 @@
import test_runner
+
def main():
runner = test_runner.TestRunner('corpus')
runner.SetEnforceExpectedImages(True)
runner.SetOneShotRenderer(True)
return runner.Run()
+
if __name__ == '__main__':
sys.exit(main())
-
diff --git a/testing/tools/run_javascript_tests.py b/testing/tools/run_javascript_tests.py
index 76d2379..cb6e69a 100755
--- a/testing/tools/run_javascript_tests.py
+++ b/testing/tools/run_javascript_tests.py
@@ -7,9 +7,11 @@
import test_runner
+
def main():
runner = test_runner.TestRunner('javascript')
return runner.Run()
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/testing/tools/run_pixel_tests.py b/testing/tools/run_pixel_tests.py
index 1d61966..92523d1 100755
--- a/testing/tools/run_pixel_tests.py
+++ b/testing/tools/run_pixel_tests.py
@@ -7,10 +7,12 @@
import test_runner
+
def main():
runner = test_runner.TestRunner('pixel')
runner.SetEnforceExpectedImages(True)
return runner.Run()
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/testing/tools/safetynet_compare.py b/testing/tools/safetynet_compare.py
index 3c0de03..00c67a8 100755
--- a/testing/tools/safetynet_compare.py
+++ b/testing/tools/safetynet_compare.py
@@ -2,7 +2,6 @@
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Compares the performance of two versions of the pdfium code."""
import argparse
@@ -48,8 +47,7 @@
self.safe_script_dir = os.path.join('testing', 'tools')
self.safe_measure_script_path = os.path.abspath(
- os.path.join(self.safe_script_dir,
- 'safetynet_measure.py'))
+ os.path.join(self.safe_script_dir, 'safetynet_measure.py'))
input_file_re = re.compile('^.+[.]pdf$')
self.test_cases = []
@@ -82,12 +80,10 @@
if self.args.branch_after:
if self.args.this_repo:
before, after = self._ProfileTwoOtherBranchesInThisRepo(
- self.args.branch_before,
- self.args.branch_after)
+ self.args.branch_before, self.args.branch_after)
else:
- before, after = self._ProfileTwoOtherBranches(
- self.args.branch_before,
- self.args.branch_after)
+ before, after = self._ProfileTwoOtherBranches(self.args.branch_before,
+ self.args.branch_after)
elif self.args.branch_before:
if self.args.this_repo:
before, after = self._ProfileCurrentAndOtherBranchInThisRepo(
@@ -111,11 +107,8 @@
if self.args.png_dir:
image_comparison = ImageComparison(
- self.after_build_dir,
- self.args.png_dir,
- ('before', 'after'),
- self.args.num_workers,
- self.args.png_threshold)
+ self.after_build_dir, self.args.png_dir, ('before', 'after'),
+ self.args.num_workers, self.args.png_threshold)
image_comparison.Run(open_in_browser=not self.args.machine_readable)
return 0
@@ -186,11 +179,9 @@
mapping a test case name to the profiling values for that test case
in the given branch.
"""
- after = self._ProfileSeparateRepo('after',
- self.after_build_dir,
+ after = self._ProfileSeparateRepo('after', self.after_build_dir,
after_branch)
- before = self._ProfileSeparateRepo('before',
- self.before_build_dir,
+ before = self._ProfileSeparateRepo('before', self.before_build_dir,
before_branch)
return before, after
@@ -248,8 +239,7 @@
self._BuildCurrentBranch(self.after_build_dir)
after = self._MeasureCurrentBranch('after', self.after_build_dir)
- before = self._ProfileSeparateRepo('before',
- self.before_build_dir,
+ before = self._ProfileSeparateRepo('before', self.before_build_dir,
other_branch)
return before, after
@@ -312,8 +302,7 @@
A dict mapping each test case name to the profiling values for that
test case.
"""
- build_dir = self._CreateTempRepo('repo_%s' % run_label,
- relative_build_dir,
+ build_dir = self._CreateTempRepo('repo_%s' % run_label, relative_build_dir,
branch)
self._BuildCurrentBranch(build_dir)
@@ -345,8 +334,10 @@
os.chdir(repo_dir)
PrintErr('Syncing...')
- cmd = ['gclient', 'config', '--unmanaged',
- 'https://pdfium.googlesource.com/pdfium.git']
+ cmd = [
+ 'gclient', 'config', '--unmanaged',
+ 'https://pdfium.googlesource.com/pdfium.git'
+ ]
if self.args.cache_dir:
cmd.append('--cache-dir=%s' % self.args.cache_dir)
RunCommandPropagateErr(cmd, exit_status_on_error=1)
@@ -371,7 +362,6 @@
return build_dir
-
def _CheckoutBranch(self, branch):
PrintErr("Checking out branch '%s'" % branch)
self.git.Checkout(branch)
@@ -449,15 +439,15 @@
"""
results = {}
pool = multiprocessing.Pool(self.args.num_workers)
- worker_func = functools.partial(
- RunSingleTestCaseParallel, self, run_label, build_dir)
+ worker_func = functools.partial(RunSingleTestCaseParallel, self, run_label,
+ build_dir)
try:
# The timeout is a workaround for http://bugs.python.org/issue8296
# which prevents KeyboardInterrupt from working.
one_year_in_seconds = 3600 * 24 * 365
- worker_results = (pool.map_async(worker_func, self.test_cases)
- .get(one_year_in_seconds))
+ worker_results = (
+ pool.map_async(worker_func, self.test_cases).get(one_year_in_seconds))
for worker_result in worker_results:
test_case, result = worker_result
if result is not None:
@@ -484,8 +474,10 @@
Returns:
The measured profiling value for that test case.
"""
- command = [self.safe_measure_script_path, test_case,
- '--build-dir=%s' % build_dir]
+ command = [
+ self.safe_measure_script_path, test_case,
+ '--build-dir=%s' % build_dir
+ ]
if self.args.interesting_section:
command.append('--interesting-section')
@@ -525,15 +517,14 @@
test_case_dir, test_case_filename = os.path.split(test_case)
test_case_png_matcher = '%s.*.png' % test_case_filename
- for output_png in glob.glob(os.path.join(test_case_dir,
- test_case_png_matcher)):
+ for output_png in glob.glob(
+ os.path.join(test_case_dir, test_case_png_matcher)):
shutil.move(output_png, png_dir)
def _GetProfileFilePath(self, run_label, test_case):
if self.args.output_dir:
- output_filename = ('callgrind.out.%s.%s'
- % (test_case.replace('/', '_'),
- run_label))
+ output_filename = (
+ 'callgrind.out.%s.%s' % (test_case.replace('/', '_'), run_label))
return os.path.join(self.args.output_dir, output_filename)
else:
return None
@@ -616,81 +607,110 @@
def main():
parser = argparse.ArgumentParser()
- parser.add_argument('input_paths', nargs='+',
- help='pdf files or directories to search for pdf files '
- 'to run as test cases')
- parser.add_argument('--branch-before',
- help='git branch to use as "before" for comparison. '
- 'Omitting this will use the current branch '
- 'without uncommitted changes as the baseline.')
- parser.add_argument('--branch-after',
- help='git branch to use as "after" for comparison. '
- 'Omitting this will use the current branch '
- 'with uncommitted changes.')
- parser.add_argument('--build-dir', default=os.path.join('out', 'Release'),
- help='relative path from the base source directory '
- 'to the build directory')
- parser.add_argument('--build-dir-before',
- help='relative path from the base source directory '
- 'to the build directory for the "before" branch, if '
- 'different from the build directory for the '
- '"after" branch')
- parser.add_argument('--cache-dir', default=None,
- help='directory with a new or preexisting cache for '
- 'downloads. Default is to not use a cache.')
- parser.add_argument('--this-repo', action='store_true',
- help='use the repository where the script is instead of '
- 'checking out a temporary one. This is faster and '
- 'does not require downloads, but although it '
- 'restores the state of the local repo, if the '
- 'script is killed or crashes the changes can remain '
- 'stashed and you may be on another branch.')
- parser.add_argument('--profiler', default='callgrind',
- help='which profiler to use. Supports callgrind, '
- 'perfstat, and none. Default is callgrind.')
- parser.add_argument('--interesting-section', action='store_true',
- help='whether to measure just the interesting section or '
- 'the whole test harness. Limiting to only the '
- 'interesting section does not work on Release since '
- 'the delimiters are optimized out')
- parser.add_argument('--pages',
- help='selects some pages to be rendered. Page numbers '
- 'are 0-based. "--pages A" will render only page A. '
- '"--pages A-B" will render pages A to B '
- '(inclusive).')
- parser.add_argument('--num-workers', default=multiprocessing.cpu_count(),
- type=int, help='run NUM_WORKERS jobs in parallel')
- parser.add_argument('--output-dir',
- help='directory to write the profile data output files')
- parser.add_argument('--png-dir', default=None,
- help='outputs pngs to the specified directory that can '
- 'be compared with a static html generated. Will '
- 'affect performance measurements.')
- parser.add_argument('--png-threshold', default=0.0, type=float,
- help='Requires --png-dir. Threshold above which a png '
- 'is considered to have changed.')
- parser.add_argument('--threshold-significant', default=0.02, type=float,
- help='variations in performance above this factor are '
- 'considered significant')
- parser.add_argument('--machine-readable', action='store_true',
- help='whether to get output for machines. If enabled the '
- 'output will be a json with the format specified in '
- 'ComparisonConclusions.GetOutputDict(). Default is '
- 'human-readable.')
- parser.add_argument('--case-order', default=None,
- help='what key to use when sorting test cases in the '
- 'output. Accepted values are "after", "before", '
- '"ratio" and "rating". Default is sorting by test '
- 'case path.')
+ parser.add_argument(
+ 'input_paths',
+ nargs='+',
+ help='pdf files or directories to search for pdf files '
+ 'to run as test cases')
+ parser.add_argument(
+ '--branch-before',
+ help='git branch to use as "before" for comparison. '
+ 'Omitting this will use the current branch '
+ 'without uncommitted changes as the baseline.')
+ parser.add_argument(
+ '--branch-after',
+ help='git branch to use as "after" for comparison. '
+ 'Omitting this will use the current branch '
+ 'with uncommitted changes.')
+ parser.add_argument(
+ '--build-dir',
+ default=os.path.join('out', 'Release'),
+ help='relative path from the base source directory '
+ 'to the build directory')
+ parser.add_argument(
+ '--build-dir-before',
+ help='relative path from the base source directory '
+ 'to the build directory for the "before" branch, if '
+ 'different from the build directory for the '
+ '"after" branch')
+ parser.add_argument(
+ '--cache-dir',
+ default=None,
+ help='directory with a new or preexisting cache for '
+ 'downloads. Default is to not use a cache.')
+ parser.add_argument(
+ '--this-repo',
+ action='store_true',
+ help='use the repository where the script is instead of '
+ 'checking out a temporary one. This is faster and '
+ 'does not require downloads, but although it '
+ 'restores the state of the local repo, if the '
+ 'script is killed or crashes the changes can remain '
+ 'stashed and you may be on another branch.')
+ parser.add_argument(
+ '--profiler',
+ default='callgrind',
+ help='which profiler to use. Supports callgrind, '
+ 'perfstat, and none. Default is callgrind.')
+ parser.add_argument(
+ '--interesting-section',
+ action='store_true',
+ help='whether to measure just the interesting section or '
+ 'the whole test harness. Limiting to only the '
+ 'interesting section does not work on Release since '
+ 'the delimiters are optimized out')
+ parser.add_argument(
+ '--pages',
+ help='selects some pages to be rendered. Page numbers '
+ 'are 0-based. "--pages A" will render only page A. '
+ '"--pages A-B" will render pages A to B '
+ '(inclusive).')
+ parser.add_argument(
+ '--num-workers',
+ default=multiprocessing.cpu_count(),
+ type=int,
+ help='run NUM_WORKERS jobs in parallel')
+ parser.add_argument(
+ '--output-dir', help='directory to write the profile data output files')
+ parser.add_argument(
+ '--png-dir',
+ default=None,
+ help='outputs pngs to the specified directory that can '
+ 'be compared with a static html generated. Will '
+ 'affect performance measurements.')
+ parser.add_argument(
+ '--png-threshold',
+ default=0.0,
+ type=float,
+ help='Requires --png-dir. Threshold above which a png '
+ 'is considered to have changed.')
+ parser.add_argument(
+ '--threshold-significant',
+ default=0.02,
+ type=float,
+ help='variations in performance above this factor are '
+ 'considered significant')
+ parser.add_argument(
+ '--machine-readable',
+ action='store_true',
+ help='whether to get output for machines. If enabled the '
+ 'output will be a json with the format specified in '
+ 'ComparisonConclusions.GetOutputDict(). Default is '
+ 'human-readable.')
+ parser.add_argument(
+ '--case-order',
+ default=None,
+ help='what key to use when sorting test cases in the '
+ 'output. Accepted values are "after", "before", '
+ '"ratio" and "rating". Default is sorting by test '
+ 'case path.')
args = parser.parse_args()
# Always start at the pdfium src dir, which is assumed to be two level above
# this script.
pdfium_src_dir = os.path.join(
- os.path.dirname(__file__),
- os.path.pardir,
- os.path.pardir)
+ os.path.dirname(__file__), os.path.pardir, os.path.pardir)
os.chdir(pdfium_src_dir)
git = GitHelper()
diff --git a/testing/tools/safetynet_conclusions.py b/testing/tools/safetynet_conclusions.py
index fdbc10d..8f0b28c 100644
--- a/testing/tools/safetynet_conclusions.py
+++ b/testing/tools/safetynet_conclusions.py
@@ -1,12 +1,10 @@
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Classes that draw conclusions out of a comparison and represent them."""
from collections import Counter
-
FORMAT_RED = '\033[01;31m{0}\033[00m'
FORMAT_GREEN = '\033[01;32m{0}\033[00m'
FORMAT_MAGENTA = '\033[01;35m{0}\033[00m'
@@ -20,10 +18,7 @@
RATING_SMALL_CHANGE = 'small_change'
RATINGS = [
- RATING_FAILURE,
- RATING_REGRESSION,
- RATING_IMPROVEMENT,
- RATING_NO_CHANGE,
+ RATING_FAILURE, RATING_REGRESSION, RATING_IMPROVEMENT, RATING_NO_CHANGE,
RATING_SMALL_CHANGE
]
@@ -233,10 +228,12 @@
def GetOutputDict(self):
"""Returns a dict with the test case's conclusions."""
- return {'before': self.before,
- 'after': self.after,
- 'ratio': self.ratio,
- 'rating': self.rating}
+ return {
+ 'before': self.before,
+ 'after': self.after,
+ 'ratio': self.ratio,
+ 'rating': self.rating
+ }
def PrintConclusionsDictHumanReadable(conclusions_dict, colored, key=None):
@@ -249,18 +246,16 @@
"""
# Print header
print '=' * 80
- print '{0:>11s} {1:>15s} {2}' .format(
- '% Change',
- 'Time after',
- 'Test case')
+ print '{0:>11s} {1:>15s} {2}'.format('% Change', 'Time after', 'Test case')
print '-' * 80
color = FORMAT_NORMAL
# Print cases
if key is not None:
- case_pairs = sorted(conclusions_dict['comparison_by_case'].iteritems(),
- key=lambda kv: kv[1][key])
+ case_pairs = sorted(
+ conclusions_dict['comparison_by_case'].iteritems(),
+ key=lambda kv: kv[1][key])
else:
case_pairs = sorted(conclusions_dict['comparison_by_case'].iteritems())
@@ -270,14 +265,12 @@
if case_dict['rating'] == RATING_FAILURE:
print u'{} to measure time for {}'.format(
- color.format('Failed'),
- case_name).encode('utf-8')
+ color.format('Failed'), case_name).encode('utf-8')
continue
- print u'{0} {1:15,d} {2}' .format(
+ print u'{0} {1:15,d} {2}'.format(
color.format('{:+11.4%}'.format(case_dict['ratio'])),
- case_dict['after'],
- case_name).encode('utf-8')
+ case_dict['after'], case_name).encode('utf-8')
# Print totals
totals = conclusions_dict['summary']
@@ -286,15 +279,12 @@
if colored:
color = FORMAT_MAGENTA if totals[RATING_FAILURE] else FORMAT_GREEN
- print ('Failed to measure: %s'
- % color.format(totals[RATING_FAILURE]))
+ print('Failed to measure: %s' % color.format(totals[RATING_FAILURE]))
if colored:
color = FORMAT_RED if totals[RATING_REGRESSION] else FORMAT_GREEN
- print ('Regressions: %s'
- % color.format(totals[RATING_REGRESSION]))
+ print('Regressions: %s' % color.format(totals[RATING_REGRESSION]))
if colored:
color = FORMAT_CYAN if totals[RATING_IMPROVEMENT] else FORMAT_GREEN
- print ('Improvements: %s'
- % color.format(totals[RATING_IMPROVEMENT]))
+ print('Improvements: %s' % color.format(totals[RATING_IMPROVEMENT]))
diff --git a/testing/tools/safetynet_image.py b/testing/tools/safetynet_image.py
index 319eeb9..628ed04 100644
--- a/testing/tools/safetynet_image.py
+++ b/testing/tools/safetynet_image.py
@@ -1,7 +1,6 @@
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Compares pairs of page images and generates an HTML to look at differences.
"""
@@ -32,8 +31,8 @@
images, so /tmp/images is self-contained and can be moved around or shared.
"""
- def __init__(self, build_dir, output_path, two_labels,
- num_workers, threshold_fraction):
+ def __init__(self, build_dir, output_path, two_labels, num_workers,
+ threshold_fraction):
"""Constructor.
Args:
@@ -72,8 +71,8 @@
if not os.path.exists(self.diff_path):
os.makedirs(self.diff_path)
- self.image_locations = ImageLocations(
- self.output_path, self.diff_path, self.two_labels)
+ self.image_locations = ImageLocations(self.output_path, self.diff_path,
+ self.two_labels)
difference = self._GenerateDiffs()
@@ -116,9 +115,10 @@
# The timeout is a workaround for http://bugs.python.org/issue8296
# which prevents KeyboardInterrupt from working.
one_year_in_seconds = 3600 * 24 * 365
- worker_results = (pool.map_async(worker_func,
- self.image_locations.Images())
- .get(one_year_in_seconds))
+ worker_results = (
+ pool.map_async(
+ worker_func,
+ self.image_locations.Images()).get(one_year_in_seconds))
for worker_result in worker_results:
image, result = worker_result
difference[image] = result
@@ -146,22 +146,23 @@
percentage of pixels changed.
"""
try:
- subprocess.check_output(
- [self.img_diff_bin,
- self.image_locations.Left(image),
- self.image_locations.Right(image)])
+ subprocess.check_output([
+ self.img_diff_bin,
+ self.image_locations.Left(image),
+ self.image_locations.Right(image)
+ ])
except subprocess.CalledProcessError as e:
percentage_change = float(re.findall(r'\d+\.\d+', e.output)[0])
else:
return image, 0
try:
- subprocess.check_output(
- [self.img_diff_bin,
- '--diff',
- self.image_locations.Left(image),
- self.image_locations.Right(image),
- self.image_locations.Diff(image)])
+ subprocess.check_output([
+ self.img_diff_bin, '--diff',
+ self.image_locations.Left(image),
+ self.image_locations.Right(image),
+ self.image_locations.Diff(image)
+ ])
except subprocess.CalledProcessError as e:
return image, percentage_change
else:
@@ -185,12 +186,10 @@
f.write('<tr>')
self._WritePageCompareTd(
- f,
- self._GetRelativePath(self.image_locations.Left(image)),
+ f, self._GetRelativePath(self.image_locations.Left(image)),
self._GetRelativePath(self.image_locations.Right(image)))
- self._WritePageTd(
- f,
- self._GetRelativePath(self.image_locations.Diff(image)))
+ self._WritePageTd(f, self._GetRelativePath(
+ self.image_locations.Diff(image)))
f.write('</tr>')
def _WritePageTd(self, f, image_path):
@@ -215,9 +214,8 @@
f.write('<td>')
f.write('<img src="%s" '
'onmouseover="this.src=\'%s\';" '
- 'onmouseout="this.src=\'%s\';">' % (normal_image_path,
- hover_image_path,
- normal_image_path))
+ 'onmouseout="this.src=\'%s\';">' %
+ (normal_image_path, hover_image_path, normal_image_path))
f.write('</td>')
def _WriteSmallChanges(self, f, small_changes):
@@ -265,8 +263,9 @@
return (pieces[0], int(pieces[1]))
self.images.sort(key=KeyFn)
- self.diff = {image: os.path.join(self.diff_path, image)
- for image in self.images}
+ self.diff = {
+ image: os.path.join(self.diff_path, image) for image in self.images
+ }
def _FindImages(self, label):
"""Traverses a dir and builds a dict of all page images to compare in it.
@@ -280,8 +279,9 @@
image_path_matcher = os.path.join(self.output_path, label, '*.*.png')
image_paths = glob.glob(image_path_matcher)
- image_dict = {os.path.split(image_path)[1]: image_path
- for image_path in image_paths}
+ image_dict = {
+ os.path.split(image_path)[1]: image_path for image_path in image_paths
+ }
return image_dict
diff --git a/testing/tools/safetynet_job.py b/testing/tools/safetynet_job.py
index f0e4efa..dba524a 100755
--- a/testing/tools/safetynet_job.py
+++ b/testing/tools/safetynet_job.py
@@ -2,7 +2,6 @@
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Looks for performance regressions on all pushes since the last run.
Run this nightly to have a periodical check for performance regressions.
@@ -62,9 +61,7 @@
there was an improvement and no regression.
"""
pdfium_src_dir = os.path.join(
- os.path.dirname(__file__),
- os.path.pardir,
- os.path.pardir)
+ os.path.dirname(__file__), os.path.pardir, os.path.pardir)
os.chdir(pdfium_src_dir)
branch_to_restore = self.git.GetCurrentBranchName()
@@ -114,8 +111,8 @@
"""
current = self.git.GetCurrentBranchHash()
- PrintWithTime('Incremental run, current is %s, last is %s'
- % (current, last_revision_covered))
+ PrintWithTime('Incremental run, current is %s, last is %s' %
+ (current, last_revision_covered))
if not os.path.exists(self.context.run_output_dir):
os.makedirs(self.context.run_output_dir)
@@ -123,18 +120,19 @@
if current == last_revision_covered:
PrintWithTime('No changes seen, finishing job')
output_info = {
- 'metadata': self._BuildRunMetadata(last_revision_covered,
- current,
- False)}
+ 'metadata':
+ self._BuildRunMetadata(last_revision_covered, current, False)
+ }
self._WriteRawJson(output_info)
return 0
# Run compare
- cmd = ['testing/tools/safetynet_compare.py',
- '--this-repo',
- '--machine-readable',
- '--branch-before=%s' % last_revision_covered,
- '--output-dir=%s' % self.context.run_output_dir]
+ cmd = [
+ 'testing/tools/safetynet_compare.py', '--this-repo',
+ '--machine-readable',
+ '--branch-before=%s' % last_revision_covered,
+ '--output-dir=%s' % self.context.run_output_dir
+ ]
cmd.extend(self.args.input_paths)
json_output = RunCommandPropagateErr(cmd)
@@ -144,16 +142,14 @@
output_info = json.loads(json_output)
- run_metadata = self._BuildRunMetadata(last_revision_covered,
- current,
- True)
+ run_metadata = self._BuildRunMetadata(last_revision_covered, current, True)
output_info.setdefault('metadata', {}).update(run_metadata)
self._WriteRawJson(output_info)
- PrintConclusionsDictHumanReadable(output_info,
- colored=(not self.args.output_to_log
- and not self.args.no_color),
- key='after')
+ PrintConclusionsDictHumanReadable(
+ output_info,
+ colored=(not self.args.output_to_log and not self.args.no_color),
+ key='after')
status = 0
@@ -194,22 +190,31 @@
def main():
parser = argparse.ArgumentParser()
- parser.add_argument('results_dir',
- help='where to write the job results')
- parser.add_argument('input_paths', nargs='+',
- help='pdf files or directories to search for pdf files '
- 'to run as test cases')
- parser.add_argument('--no-checkout', action='store_true',
- help='whether to skip checking out origin/master. Use '
- 'for script debugging.')
- parser.add_argument('--no-checkpoint', action='store_true',
- help='whether to skip writing the new checkpoint. Use '
- 'for script debugging.')
- parser.add_argument('--no-color', action='store_true',
- help='whether to write output without color escape '
- 'codes.')
- parser.add_argument('--output-to-log', action='store_true',
- help='whether to write output to a log file')
+ parser.add_argument('results_dir', help='where to write the job results')
+ parser.add_argument(
+ 'input_paths',
+ nargs='+',
+ help='pdf files or directories to search for pdf files '
+ 'to run as test cases')
+ parser.add_argument(
+ '--no-checkout',
+ action='store_true',
+ help='whether to skip checking out origin/master. Use '
+ 'for script debugging.')
+ parser.add_argument(
+ '--no-checkpoint',
+ action='store_true',
+ help='whether to skip writing the new checkpoint. Use '
+ 'for script debugging.')
+ parser.add_argument(
+ '--no-color',
+ action='store_true',
+ help='whether to write output without color escape '
+ 'codes.')
+ parser.add_argument(
+ '--output-to-log',
+ action='store_true',
+ help='whether to write output to a log file')
args = parser.parse_args()
job_context = JobContext(args)
@@ -230,4 +235,3 @@
if __name__ == '__main__':
sys.exit(main())
-
diff --git a/testing/tools/safetynet_measure.py b/testing/tools/safetynet_measure.py
index 55046a2..fcda406 100755
--- a/testing/tools/safetynet_measure.py
+++ b/testing/tools/safetynet_measure.py
@@ -2,7 +2,6 @@
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Measures performance for rendering a single test case with pdfium.
The output is a number that is a metric which depends on the profiler specified.
@@ -16,13 +15,13 @@
from common import PrintErr
-
CALLGRIND_PROFILER = 'callgrind'
PERFSTAT_PROFILER = 'perfstat'
NONE_PROFILER = 'none'
PDFIUM_TEST = 'pdfium_test'
+
class PerformanceRun(object):
"""A single measurement of a test case."""
@@ -33,13 +32,13 @@
def _CheckTools(self):
"""Returns whether the tool file paths are sane."""
if not os.path.exists(self.pdfium_test_path):
- PrintErr("FAILURE: Can't find test executable '%s'"
- % self.pdfium_test_path)
+ PrintErr(
+ "FAILURE: Can't find test executable '%s'" % self.pdfium_test_path)
PrintErr('Use --build-dir to specify its location.')
return False
if not os.access(self.pdfium_test_path, os.X_OK):
- PrintErr("FAILURE: Test executable '%s' lacks execution permissions"
- % self.pdfium_test_path)
+ PrintErr("FAILURE: Test executable '%s' lacks execution permissions" %
+ self.pdfium_test_path)
return False
return True
@@ -79,10 +78,11 @@
instrument_at_start = 'no' if self.args.interesting_section else 'yes'
output_path = self.args.output_path or '/dev/null'
- valgrind_cmd = (['valgrind', '--tool=callgrind',
- '--instr-atstart=%s' % instrument_at_start,
- '--callgrind-out-file=%s' % output_path]
- + self._BuildTestHarnessCommand())
+ valgrind_cmd = ([
+ 'valgrind', '--tool=callgrind',
+ '--instr-atstart=%s' % instrument_at_start,
+ '--callgrind-out-file=%s' % output_path
+ ] + self._BuildTestHarnessCommand())
output = subprocess.check_output(valgrind_cmd, stderr=subprocess.STDOUT)
# Match the line with the instruction count, eg.
@@ -97,8 +97,8 @@
"""
# --no-big-num: do not add thousands separators
# -einstructions: print only instruction count
- cmd_to_run = (['perf', 'stat', '--no-big-num', '-einstructions']
- + self._BuildTestHarnessCommand())
+ cmd_to_run = (['perf', 'stat', '--no-big-num', '-einstructions'] +
+ self._BuildTestHarnessCommand())
output = subprocess.check_output(cmd_to_run, stderr=subprocess.STDOUT)
# Match the line with the instruction count, eg.
@@ -145,33 +145,42 @@
def main():
parser = argparse.ArgumentParser()
- parser.add_argument('pdf_path',
- help='test case to measure load and rendering time')
- parser.add_argument('--build-dir', default=os.path.join('out', 'Release'),
- help='relative path to the build directory with '
- '%s' % PDFIUM_TEST)
- parser.add_argument('--profiler', default=CALLGRIND_PROFILER,
- help='which profiler to use. Supports callgrind, '
- 'perfstat, and none.')
- parser.add_argument('--interesting-section', action='store_true',
- help='whether to measure just the interesting section or '
- 'the whole test harness. The interesting section is '
- 'pdfium reading a pdf from memory and rendering '
- 'it, which omits loading the time to load the file, '
- 'initialize the library, terminate it, etc. '
- 'Limiting to only the interesting section does not '
- 'work on Release since the delimiters are optimized '
- 'out. Callgrind only.')
- parser.add_argument('--png', action='store_true',
- help='outputs a png image on the same location as the '
- 'pdf file')
- parser.add_argument('--pages',
- help='selects some pages to be rendered. Page numbers '
- 'are 0-based. "--pages A" will render only page A. '
- '"--pages A-B" will render pages A to B '
- '(inclusive).')
- parser.add_argument('--output-path',
- help='where to write the profile data output file')
+ parser.add_argument(
+ 'pdf_path', help='test case to measure load and rendering time')
+ parser.add_argument(
+ '--build-dir',
+ default=os.path.join('out', 'Release'),
+ help='relative path to the build directory with '
+ '%s' % PDFIUM_TEST)
+ parser.add_argument(
+ '--profiler',
+ default=CALLGRIND_PROFILER,
+ help='which profiler to use. Supports callgrind, '
+ 'perfstat, and none.')
+ parser.add_argument(
+ '--interesting-section',
+ action='store_true',
+ help='whether to measure just the interesting section or '
+ 'the whole test harness. The interesting section is '
+ 'pdfium reading a pdf from memory and rendering '
+ 'it, which omits loading the time to load the file, '
+ 'initialize the library, terminate it, etc. '
+ 'Limiting to only the interesting section does not '
+ 'work on Release since the delimiters are optimized '
+ 'out. Callgrind only.')
+ parser.add_argument(
+ '--png',
+ action='store_true',
+ help='outputs a png image on the same location as the '
+ 'pdf file')
+ parser.add_argument(
+ '--pages',
+ help='selects some pages to be rendered. Page numbers '
+ 'are 0-based. "--pages A" will render only page A. '
+ '"--pages A-B" will render pages A to B '
+ '(inclusive).')
+ parser.add_argument(
+ '--output-path', help='where to write the profile data output file')
args = parser.parse_args()
if args.interesting_section and args.profiler != CALLGRIND_PROFILER:
diff --git a/testing/tools/suppressor.py b/testing/tools/suppressor.py
index 2b4b5d3..f3c00f4 100755
--- a/testing/tools/suppressor.py
+++ b/testing/tools/suppressor.py
@@ -7,36 +7,40 @@
import common
+
class Suppressor:
+
def __init__(self, finder, feature_string):
feature_vector = feature_string.strip().split(",")
self.has_v8 = "V8" in feature_vector
self.has_xfa = "XFA" in feature_vector
self.suppression_set = self._LoadSuppressedSet('SUPPRESSIONS', finder)
self.image_suppression_set = self._LoadSuppressedSet(
- 'SUPPRESSIONS_IMAGE_DIFF',
- finder)
+ 'SUPPRESSIONS_IMAGE_DIFF', finder)
def _LoadSuppressedSet(self, suppressions_filename, finder):
v8_option = "v8" if self.has_v8 else "nov8"
xfa_option = "xfa" if self.has_xfa else "noxfa"
with open(os.path.join(finder.TestingDir(), suppressions_filename)) as f:
- return set(self._FilterSuppressions(
- common.os_name(), v8_option, xfa_option, self._ExtractSuppressions(f)))
+ return set(
+ self._FilterSuppressions(common.os_name(), v8_option, xfa_option,
+ self._ExtractSuppressions(f)))
def _ExtractSuppressions(self, f):
- return [y.split(' ') for y in
- [x.split('#')[0].strip() for x in
- f.readlines()] if y]
+ return [
+ y.split(' ') for y in [x.split('#')[0].strip()
+ for x in f.readlines()] if y
+ ]
def _FilterSuppressions(self, os, js, xfa, unfiltered_list):
- return [x[0] for x in unfiltered_list
- if self._MatchSuppression(x, os, js, xfa)]
+ return [
+ x[0] for x in unfiltered_list if self._MatchSuppression(x, os, js, xfa)
+ ]
def _MatchSuppression(self, item, os, js, xfa):
- os_column = item[1].split(",");
- js_column = item[2].split(",");
- xfa_column = item[3].split(",");
+ os_column = item[1].split(",")
+ js_column = item[2].split(",")
+ xfa_column = item[3].split(",")
return (('*' in os_column or os in os_column) and
('*' in js_column or js in js_column) and
('*' in xfa_column or xfa in xfa_column))
diff --git a/testing/tools/test_runner.py b/testing/tools/test_runner.py
index 74a86b1..fd901a0 100644
--- a/testing/tools/test_runner.py
+++ b/testing/tools/test_runner.py
@@ -22,7 +22,10 @@
# timestamp of the first commit to repo, 2014/5/9 17:48:50.
TEST_SEED_TIME = "1399672130"
-class KeyboardInterruptError(Exception): pass
+
+class KeyboardInterruptError(Exception):
+ pass
+
# Nomenclature:
# x_root - "x"
@@ -30,11 +33,12 @@
# x_path - "path/to/a/b/c/x.ext"
# c_dir - "path/to/a/b/c"
+
def TestOneFileParallel(this, test_case):
"""Wrapper to call GenerateAndTest() and redirect output to stdout."""
try:
input_filename, source_dir = test_case
- result = this.GenerateAndTest(input_filename, source_dir);
+ result = this.GenerateAndTest(input_filename, source_dir)
return (result, input_filename, source_dir)
except KeyboardInterrupt:
raise KeyboardInterruptError()
@@ -48,6 +52,7 @@
class TestRunner:
+
def __init__(self, dirname):
# Currently the only used directories are corpus, javascript, and pixel,
# which all correspond directly to the type for the test being run. In the
@@ -101,8 +106,8 @@
self.RegenerateIfNeeded_(input_filename, source_dir)
return False, results
else:
- if (self.enforce_expected_images
- and not self.test_suppressor.IsImageDiffSuppressed(input_filename)):
+ if (self.enforce_expected_images and
+ not self.test_suppressor.IsImageDiffSuppressed(input_filename)):
self.RegenerateIfNeeded_(input_filename, source_dir)
print 'FAILURE: %s; Missing expected images' % input_filename
return False, results
@@ -112,14 +117,14 @@
return True, results
def RegenerateIfNeeded_(self, input_filename, source_dir):
- if (not self.options.regenerate_expected
- or self.test_suppressor.IsResultSuppressed(input_filename)
- or self.test_suppressor.IsImageDiffSuppressed(input_filename)):
+ if (not self.options.regenerate_expected or
+ self.test_suppressor.IsResultSuppressed(input_filename) or
+ self.test_suppressor.IsImageDiffSuppressed(input_filename)):
return
platform_only = (self.options.regenerate_expected == 'platform')
- self.image_differ.Regenerate(input_filename, source_dir,
- self.working_dir, platform_only)
+ self.image_differ.Regenerate(input_filename, source_dir, self.working_dir,
+ platform_only)
def Generate(self, source_dir, input_filename, input_root, pdf_path):
original_path = os.path.join(source_dir, input_filename)
@@ -137,24 +142,29 @@
sys.stdout.flush()
- return common.RunCommand(
- [sys.executable, self.fixup_path, '--output-dir=' + self.working_dir,
- input_path])
+ return common.RunCommand([
+ sys.executable, self.fixup_path, '--output-dir=' + self.working_dir,
+ input_path
+ ])
def TestText(self, input_root, expected_txt_path, pdf_path):
txt_path = os.path.join(self.working_dir, input_root + '.txt')
with open(txt_path, 'w') as outfile:
- cmd_to_run = [self.pdfium_test_path, '--send-events',
- '--time=' + TEST_SEED_TIME, pdf_path]
+ cmd_to_run = [
+ self.pdfium_test_path, '--send-events', '--time=' + TEST_SEED_TIME,
+ pdf_path
+ ]
subprocess.check_call(cmd_to_run, stdout=outfile)
cmd = [sys.executable, self.text_diff_path, expected_txt_path, txt_path]
return common.RunCommand(cmd)
def TestPixel(self, input_root, pdf_path, use_ahem):
- cmd_to_run = [self.pdfium_test_path, '--send-events', '--png', '--md5',
- '--time=' + TEST_SEED_TIME]
+ cmd_to_run = [
+ self.pdfium_test_path, '--send-events', '--png', '--md5',
+ '--time=' + TEST_SEED_TIME
+ ]
if self.oneshot_renderer:
cmd_to_run.append('--render-oneshot')
@@ -180,11 +190,12 @@
matched = self.gold_baseline.MatchLocalResult(test_name, md5_hash)
if matched == gold.GoldBaseline.MISMATCH:
print 'Skia Gold hash mismatch for test case: %s' % test_name
- elif matched == gold.GoldBaseline.NO_BASELINE:
+ elif matched == gold.GoldBaseline.NO_BASELINE:
print 'No Skia Gold baseline found for test case: %s' % test_name
if self.gold_results:
- self.gold_results.AddTestResult(test_name, md5_hash, img_path, matched)
+ self.gold_results.AddTestResult(test_name, md5_hash, img_path,
+ matched)
if self.test_suppressor.IsResultSuppressed(input_filename):
self.result_suppressed_cases.append(input_filename)
@@ -197,45 +208,65 @@
def Run(self):
parser = optparse.OptionParser()
- parser.add_option('--build-dir', default=os.path.join('out', 'Debug'),
- help='relative path from the base source directory')
+ parser.add_option(
+ '--build-dir',
+ default=os.path.join('out', 'Debug'),
+ help='relative path from the base source directory')
- parser.add_option('-j', default=multiprocessing.cpu_count(),
- dest='num_workers', type='int',
- help='run NUM_WORKERS jobs in parallel')
+ parser.add_option(
+ '-j',
+ default=multiprocessing.cpu_count(),
+ dest='num_workers',
+ type='int',
+ help='run NUM_WORKERS jobs in parallel')
- parser.add_option('--gold_properties', default='', dest="gold_properties",
- help='Key value pairs that are written to the top level '
- 'of the JSON file that is ingested by Gold.')
+ parser.add_option(
+ '--gold_properties',
+ default='',
+ dest="gold_properties",
+ help='Key value pairs that are written to the top level '
+ 'of the JSON file that is ingested by Gold.')
- parser.add_option('--gold_key', default='', dest="gold_key",
- help='Key value pairs that are added to the "key" field '
- 'of the JSON file that is ingested by Gold.')
+ parser.add_option(
+ '--gold_key',
+ default='',
+ dest="gold_key",
+ help='Key value pairs that are added to the "key" field '
+ 'of the JSON file that is ingested by Gold.')
- parser.add_option('--gold_output_dir', default='', dest="gold_output_dir",
- help='Path of where to write the JSON output to be '
- 'uploaded to Gold.')
+ parser.add_option(
+ '--gold_output_dir',
+ default='',
+ dest="gold_output_dir",
+ help='Path of where to write the JSON output to be '
+ 'uploaded to Gold.')
- parser.add_option('--gold_ignore_hashes', default='',
- dest="gold_ignore_hashes",
- help='Path to a file with MD5 hashes we wish to ignore.')
+ parser.add_option(
+ '--gold_ignore_hashes',
+ default='',
+ dest="gold_ignore_hashes",
+ help='Path to a file with MD5 hashes we wish to ignore.')
- parser.add_option('--regenerate_expected', default='',
- dest="regenerate_expected",
- help='Regenerates expected images. Valid values are '
- '"all" to regenerate all expected pngs, and '
- '"platform" to regenerate only platform-specific '
- 'expected pngs.')
+ parser.add_option(
+ '--regenerate_expected',
+ default='',
+ dest="regenerate_expected",
+ help='Regenerates expected images. Valid values are '
+ '"all" to regenerate all expected pngs, and '
+ '"platform" to regenerate only platform-specific '
+ 'expected pngs.')
- parser.add_option('--ignore_errors', action="store_true",
- dest="ignore_errors",
- help='Prevents the return value from being non-zero '
- 'when image comparison fails.')
+ parser.add_option(
+ '--ignore_errors',
+ action="store_true",
+ dest="ignore_errors",
+ help='Prevents the return value from being non-zero '
+ 'when image comparison fails.')
self.options, self.args = parser.parse_args()
- if (self.options.regenerate_expected
- and self.options.regenerate_expected not in ['all', 'platform']) :
+ if (self.options.regenerate_expected and
+ self.options.regenerate_expected not in ['all', 'platform']):
print 'FAILURE: --regenerate_expected must be "all" or "platform"'
return 1
@@ -260,8 +291,8 @@
shutil.rmtree(self.working_dir, ignore_errors=True)
os.makedirs(self.working_dir)
- self.feature_string = subprocess.check_output([self.pdfium_test_path,
- '--show-config'])
+ self.feature_string = subprocess.check_output(
+ [self.pdfium_test_path, '--show-config'])
self.test_suppressor = suppressor.Suppressor(finder, self.feature_string)
self.image_differ = pngdiffer.PNGDiffer(finder)
error_message = self.image_differ.CheckMissingTools(
@@ -272,7 +303,7 @@
self.gold_baseline = gold.GoldBaseline(self.options.gold_properties)
- walk_from_dir = finder.TestingDir(test_dir);
+ walk_from_dir = finder.TestingDir(test_dir)
self.test_cases = []
self.execution_suppressed_cases = []
@@ -286,7 +317,7 @@
return 1
self.test_cases.append((os.path.basename(input_path),
- os.path.dirname(input_path)))
+ os.path.dirname(input_path)))
else:
for file_dir, _, filename_list in os.walk(walk_from_dir):
for input_filename in filename_list:
@@ -306,11 +337,10 @@
# Collect Gold results if an output directory was named.
self.gold_results = None
if self.options.gold_output_dir:
- self.gold_results = gold.GoldResults(self.test_type,
- self.options.gold_output_dir,
- self.options.gold_properties,
- self.options.gold_key,
- self.options.gold_ignore_hashes)
+ self.gold_results = gold.GoldResults(
+ self.test_type, self.options.gold_output_dir,
+ self.options.gold_properties, self.options.gold_key,
+ self.options.gold_ignore_hashes)
if self.options.num_workers > 1 and len(self.test_cases) > 1:
try:
diff --git a/testing/tools/text_diff.py b/testing/tools/text_diff.py
index 3a5bd7b..fdf45a0 100755
--- a/testing/tools/text_diff.py
+++ b/testing/tools/text_diff.py
@@ -6,17 +6,18 @@
import difflib
import sys
+
def main(argv):
if len(argv) != 3:
- print '%s: invalid arguments' % argv[0]
- return 2
+ print '%s: invalid arguments' % argv[0]
+ return 2
filename1 = argv[1]
filename2 = argv[2]
try:
with open(filename1, "r") as f1:
- str1 = f1.readlines();
+ str1 = f1.readlines()
with open(filename2, "r") as f2:
- str2 = f2.readlines();
+ str2 = f2.readlines()
diffs = difflib.unified_diff(
str1, str2, fromfile=filename1, tofile=filename2)
except Exception as e:
@@ -28,5 +29,6 @@
status_code = 1
return status_code
+
if __name__ == '__main__':
sys.exit(main(sys.argv))