compare_footprint: Python 3 compatibility
The script will now run under either Python 2 or Python 3. Change-Id: Ia7e6647b2331ff9edfbdec7b7357439f7095a3bc Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
114e5ba186
commit
64f7f3da8d
1 changed files with 11 additions and 11 deletions
|
@ -85,7 +85,7 @@ def get_git_commit(commit):
|
||||||
proc = subprocess.Popen('git rev-parse %s' % commit, stdout=subprocess.PIPE,
|
proc = subprocess.Popen('git rev-parse %s' % commit, stdout=subprocess.PIPE,
|
||||||
cwd=os.environ.get('ZEPHYR_BASE'), shell=True)
|
cwd=os.environ.get('ZEPHYR_BASE'), shell=True)
|
||||||
if proc.wait() == 0:
|
if proc.wait() == 0:
|
||||||
commit_id = proc.stdout.read().strip()
|
commit_id = proc.stdout.read().decode("utf-8").strip()
|
||||||
return commit_id
|
return commit_id
|
||||||
|
|
||||||
def sanity_results_filename(commit=None, cwd=os.environ.get('ZEPHYR_BASE')):
|
def sanity_results_filename(commit=None, cwd=os.environ.get('ZEPHYR_BASE')):
|
||||||
|
@ -230,7 +230,7 @@ def compare_results(base_results, current_results):
|
||||||
results = {}
|
results = {}
|
||||||
metrics = {}
|
metrics = {}
|
||||||
|
|
||||||
for type, data in {'base': base_results, 'current': current_results}.iteritems():
|
for type, data in {'base': base_results, 'current': current_results}.items():
|
||||||
metrics[type] = {}
|
metrics[type] = {}
|
||||||
for row in data:
|
for row in data:
|
||||||
d = {}
|
d = {}
|
||||||
|
@ -241,12 +241,12 @@ def compare_results(base_results, current_results):
|
||||||
metrics[type][row["test"]] = {}
|
metrics[type][row["test"]] = {}
|
||||||
metrics[type][row["test"]][row["platform"]] = d
|
metrics[type][row["test"]][row["platform"]] = d
|
||||||
|
|
||||||
for test, platforms in metrics['current'].iteritems():
|
for test, platforms in metrics['current'].items():
|
||||||
if not test in metrics['base']:
|
if not test in metrics['base']:
|
||||||
continue
|
continue
|
||||||
tests = {}
|
tests = {}
|
||||||
|
|
||||||
for platform, test_data in platforms.iteritems():
|
for platform, test_data in platforms.items():
|
||||||
if not platform in metrics['base'][test]:
|
if not platform in metrics['base'][test]:
|
||||||
continue
|
continue
|
||||||
golden_metric = metrics['base'][test][platform]
|
golden_metric = metrics['base'][test][platform]
|
||||||
|
@ -274,18 +274,18 @@ def compare_results(base_results, current_results):
|
||||||
def print_deltas(deltas):
|
def print_deltas(deltas):
|
||||||
error_count = 0
|
error_count = 0
|
||||||
for test in sorted(deltas):
|
for test in sorted(deltas):
|
||||||
print "\n{:<25}".format(test)
|
print("\n{:<25}".format(test))
|
||||||
for platform, data in deltas[test].iteritems():
|
for platform, data in deltas[test].items():
|
||||||
print " {:<25}".format(platform)
|
print(" {:<25}".format(platform))
|
||||||
for metric, value in data.iteritems():
|
for metric, value in data.items():
|
||||||
percentage = (float(value['delta']) / float(value['current'] -
|
percentage = (float(value['delta']) / float(value['current'] -
|
||||||
value['delta']))
|
value['delta']))
|
||||||
print " {} ({:+.2%}) {:+6} current size {:>7} bytes".format(
|
print(" {} ({:+.2%}) {:+6} current size {:>7} bytes".format(
|
||||||
"RAM" if metric == "ram_size" else "ROM", percentage,
|
"RAM" if metric == "ram_size" else "ROM", percentage,
|
||||||
value['delta'], value['current'])
|
value['delta'], value['current']))
|
||||||
error_count = error_count + 1
|
error_count = error_count + 1
|
||||||
if error_count == 0:
|
if error_count == 0:
|
||||||
print "There are no changes in RAM neither in ROM of footprint apps."
|
print("There are no changes in RAM neither in ROM of footprint apps.")
|
||||||
return error_count
|
return error_count
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue