scripts: move footprint scripts to footprint/
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
3461c8cf59
commit
38625607b1
3 changed files with 2 additions and 2 deletions
298
scripts/footprint/compare_footprint
Executable file
298
scripts/footprint/compare_footprint
Executable file
|
@ -0,0 +1,298 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
This script help you to compare footprint results with previous commits in git.
|
||||
If you don't have a git repository, it will compare your current tree
|
||||
against the last release results.
|
||||
To run it you need to set up the same environment as sanity check.
|
||||
The scripts take 2 optional args COMMIT and BASE_COMMIT, which tell the scripts
|
||||
which commit to use as current commit and as base for comparing, respectively.
|
||||
The script can take any SHA commit recognized for git.
|
||||
|
||||
COMMIT is the commit to compare against BASE_COMMIT.
|
||||
Default
|
||||
current working directory if we have changes in git tree or we don't have git.
|
||||
HEAD in any other case.
|
||||
BASE_COMMIT is the commit used as base to compare results.
|
||||
Default:
|
||||
sanity_last_release.csv if we don't have git tree.
|
||||
HEAD is we have changes in the working tree.
|
||||
HEAD~1 if we don't have changes and we have default COMMIT.
|
||||
COMMIT~1 if we have a valid COMMIT.
|
||||
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import csv
|
||||
import subprocess
|
||||
import logging
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
if "ZEPHYR_BASE" not in os.environ:
|
||||
logging.error("$ZEPHYR_BASE environment variable undefined.\n")
|
||||
exit(1)
|
||||
|
||||
logger = None
|
||||
GIT_ENABLED = False
|
||||
RELEASE_DATA = 'sanity_last_release.csv'
|
||||
|
||||
def is_git_enabled():
|
||||
global GIT_ENABLED
|
||||
proc = subprocess.Popen('git rev-parse --is-inside-work-tree',
|
||||
stdout=subprocess.PIPE,
|
||||
cwd=os.environ.get('ZEPHYR_BASE'), shell=True)
|
||||
if proc.wait() != 0:
|
||||
GIT_ENABLED = False
|
||||
|
||||
GIT_ENABLED = True
|
||||
|
||||
def init_logs():
|
||||
global logger
|
||||
log_lev = os.environ.get('LOG_LEVEL', None)
|
||||
level = logging.INFO
|
||||
if log_lev == "DEBUG":
|
||||
level = logging.DEBUG
|
||||
elif log_lev == "ERROR":
|
||||
level = logging.ERROR
|
||||
|
||||
console = logging.StreamHandler()
|
||||
format = logging.Formatter('%(levelname)-8s: %(message)s')
|
||||
console.setFormatter(format)
|
||||
logger = logging.getLogger('')
|
||||
logger.addHandler(console)
|
||||
logger.setLevel(level)
|
||||
|
||||
logging.debug("Log init completed")
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Compare footprint apps RAM and ROM sizes. Note: "
|
||||
"To run it you need to set up the same environment as sanitycheck.")
|
||||
parser.add_argument('-b', '--base-commit', default=None,
|
||||
help="Commit ID to use as base for footprint "
|
||||
"compare. Default is parent current commit."
|
||||
" or sanity_last_release.csv if we don't have git.")
|
||||
parser.add_argument('-c', '--commit', default=None,
|
||||
help="Commit ID to use compare footprint against base. "
|
||||
"Default is HEAD or working tree.")
|
||||
return parser.parse_args()
|
||||
|
||||
def get_git_commit(commit):
|
||||
commit_id = None
|
||||
proc = subprocess.Popen('git rev-parse %s' % commit, stdout=subprocess.PIPE,
|
||||
cwd=os.environ.get('ZEPHYR_BASE'), shell=True)
|
||||
if proc.wait() == 0:
|
||||
commit_id = proc.stdout.read().decode("utf-8").strip()
|
||||
return commit_id
|
||||
|
||||
def sanity_results_filename(commit=None, cwd=os.environ.get('ZEPHYR_BASE')):
|
||||
if not commit:
|
||||
file_name = "tmp.csv"
|
||||
else:
|
||||
if commit == RELEASE_DATA:
|
||||
file_name = RELEASE_DATA
|
||||
else:
|
||||
file_name = "%s.csv" % commit
|
||||
|
||||
return os.path.join(cwd,'scripts', 'sanity_chk', file_name)
|
||||
|
||||
def git_checkout(commit, cwd=os.environ.get('ZEPHYR_BASE')):
|
||||
proc = subprocess.Popen('git diff --quiet', stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT, cwd=cwd, shell=True)
|
||||
if proc.wait() != 0:
|
||||
raise Exception("Cannot continue, you have unstaged changes in your working tree")
|
||||
|
||||
proc = subprocess.Popen('git reset %s --hard' % commit,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=cwd, shell=True)
|
||||
if proc.wait() == 0:
|
||||
return True
|
||||
else:
|
||||
logger.error(proc.stdout.read())
|
||||
return False
|
||||
|
||||
def run_sanity_footprint(commit=None, cwd=os.environ.get('ZEPHYR_BASE'),
|
||||
output_file=None):
|
||||
if not output_file:
|
||||
output_file = sanity_results_filename(commit)
|
||||
cmd = '/bin/bash -c "source ./zephyr-env.sh && sanitycheck'
|
||||
cmd += ' +scripts/sanity_chk/sanity_compare.args -o %s"' % output_file
|
||||
logger.debug('Sanity (%s) %s' %(commit, cmd))
|
||||
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
cwd=cwd, shell=True)
|
||||
output,error=proc.communicate()
|
||||
if proc.wait() == 0:
|
||||
logger.debug(output)
|
||||
return True
|
||||
|
||||
logger.error("Couldn't build footprint apps in commit %s" % commit)
|
||||
logger.error(output)
|
||||
raise Exception("Couldn't build footprint apps in commit %s" % commit)
|
||||
|
||||
def run_footprint_build(commit=None):
|
||||
logging.debug("footprint build for %s" % commit)
|
||||
if not commit:
|
||||
run_sanity_footprint()
|
||||
else:
|
||||
cmd = "git clone --no-hardlinks %s" % os.environ.get('ZEPHYR_BASE')
|
||||
tmp_location = os.path.join(tempfile.gettempdir(),
|
||||
os.path.basename(os.environ.get('ZEPHYR_BASE')))
|
||||
if os.path.exists(tmp_location):
|
||||
shutil.rmtree(tmp_location)
|
||||
logging.debug("clonning into %s" % tmp_location)
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=tempfile.gettempdir(), shell=True)
|
||||
if proc.wait() == 0:
|
||||
if git_checkout(commit, tmp_location):
|
||||
run_sanity_footprint(commit, tmp_location)
|
||||
else:
|
||||
logger.error(proc.stdout.read())
|
||||
shutil.rmtree(tmp_location, ignore_errors=True)
|
||||
return True
|
||||
|
||||
def read_sanity_report(filename):
|
||||
data = []
|
||||
with open(filename) as fp:
|
||||
tmp = csv.DictReader(fp)
|
||||
for row in tmp:
|
||||
data.append(row)
|
||||
return data
|
||||
|
||||
def get_footprint_results(commit=None):
|
||||
results = {}
|
||||
|
||||
sanity_file = sanity_results_filename(commit)
|
||||
if (not os.path.exists(sanity_file) or not commit) and commit != RELEASE_DATA:
|
||||
run_footprint_build(commit)
|
||||
|
||||
return read_sanity_report(sanity_file)
|
||||
|
||||
def tree_changes():
|
||||
proc = subprocess.Popen('git diff --quiet', stdout=subprocess.PIPE,
|
||||
cwd=os.environ.get('ZEPHYR_BASE'), shell=True)
|
||||
if proc.wait() != 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_default_current_commit():
|
||||
if tree_changes():
|
||||
return None
|
||||
else:
|
||||
return get_git_commit('HEAD')
|
||||
|
||||
def get_default_base_commit(current_commit):
|
||||
if not current_commit:
|
||||
if tree_changes():
|
||||
return get_git_commit('HEAD')
|
||||
else:
|
||||
return get_git_commit('HEAD~1')
|
||||
else:
|
||||
return get_git_commit('%s~1'%current_commit)
|
||||
|
||||
def build_history(b_commit=None, c_commit=None):
|
||||
if not GIT_ENABLED:
|
||||
logger.info('Working on current tree, not git enabled.')
|
||||
current_commit = None
|
||||
base_commit = RELEASE_DATA
|
||||
else:
|
||||
if not c_commit:
|
||||
current_commit = get_default_current_commit()
|
||||
else:
|
||||
current_commit = get_git_commit(c_commit)
|
||||
|
||||
if not b_commit:
|
||||
base_commit = get_default_base_commit(current_commit)
|
||||
else:
|
||||
base_commit = get_git_commit(b_commit)
|
||||
|
||||
if not base_commit:
|
||||
logger.error("Cannot resolve base commit")
|
||||
return
|
||||
|
||||
logger.info("Base: %s" % base_commit)
|
||||
logger.info("Current: %s" % (current_commit if current_commit else
|
||||
'working space'))
|
||||
|
||||
current_results = get_footprint_results(current_commit)
|
||||
base_results = get_footprint_results(base_commit)
|
||||
deltas = compare_results(base_results, current_results)
|
||||
print_deltas(deltas)
|
||||
|
||||
def compare_results(base_results, current_results):
|
||||
interesting_metrics = [("ram_size", int),
|
||||
("rom_size", int)]
|
||||
results = {}
|
||||
metrics = {}
|
||||
|
||||
for type, data in {'base': base_results, 'current': current_results}.items():
|
||||
metrics[type] = {}
|
||||
for row in data:
|
||||
d = {}
|
||||
for m, mtype in interesting_metrics:
|
||||
if row[m]:
|
||||
d[m] = mtype(row[m])
|
||||
if not row["test"] in metrics[type]:
|
||||
metrics[type][row["test"]] = {}
|
||||
metrics[type][row["test"]][row["platform"]] = d
|
||||
|
||||
for test, platforms in metrics['current'].items():
|
||||
if not test in metrics['base']:
|
||||
continue
|
||||
tests = {}
|
||||
|
||||
for platform, test_data in platforms.items():
|
||||
if not platform in metrics['base'][test]:
|
||||
continue
|
||||
golden_metric = metrics['base'][test][platform]
|
||||
tmp = {}
|
||||
for metric, _ in interesting_metrics:
|
||||
if metric not in golden_metric or metric not in test_data:
|
||||
continue
|
||||
if test_data[metric] == "":
|
||||
continue
|
||||
delta = test_data[metric] - golden_metric[metric]
|
||||
if delta == 0:
|
||||
continue
|
||||
tmp[metric] = {
|
||||
'delta': delta,
|
||||
'current': test_data[metric],
|
||||
}
|
||||
|
||||
if len(tmp) != 0:
|
||||
tests[platform] = tmp
|
||||
if len(tests) != 0:
|
||||
results[test] = tests
|
||||
|
||||
return results
|
||||
|
||||
def print_deltas(deltas):
|
||||
error_count = 0
|
||||
for test in sorted(deltas):
|
||||
print("\n{:<25}".format(test))
|
||||
for platform, data in deltas[test].items():
|
||||
print(" {:<25}".format(platform))
|
||||
for metric, value in data.items():
|
||||
percentage = (float(value['delta']) / float(value['current'] -
|
||||
value['delta']))
|
||||
print(" {} ({:+.2%}) {:+6} current size {:>7} bytes".format(
|
||||
"RAM" if metric == "ram_size" else "ROM", percentage,
|
||||
value['delta'], value['current']))
|
||||
error_count = error_count + 1
|
||||
if error_count == 0:
|
||||
print("There are no changes in RAM neither in ROM of footprint apps.")
|
||||
return error_count
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
build_history(args.base_commit, args.commit)
|
||||
|
||||
if __name__ == "__main__":
|
||||
init_logs()
|
||||
is_git_enabled()
|
||||
main()
|
344
scripts/footprint/size_report
Executable file
344
scripts/footprint/size_report
Executable file
|
@ -0,0 +1,344 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (c) 2016, Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Based on a script by:
|
||||
# Chereau, Fabien <fabien.chereau@intel.com>
|
||||
|
||||
import os
|
||||
import re
|
||||
from optparse import OptionParser
|
||||
import sys
|
||||
import argparse
|
||||
import subprocess
|
||||
import json
|
||||
import operator
|
||||
|
||||
class bcolors:
|
||||
HEADER = '\033[95m'
|
||||
OKBLUE = '\033[94m'
|
||||
OKGREEN = '\033[92m'
|
||||
WARNING = '\033[93m'
|
||||
FAIL = '\033[91m'
|
||||
ENDC = '\033[0m'
|
||||
BOLD = '\033[1m'
|
||||
UNDERLINE = '\033[4m'
|
||||
|
||||
|
||||
parser = OptionParser()
|
||||
parser.add_option("-d", "--depth", dest="depth", type="int",
|
||||
help="How deep should we go into the tree", metavar="DEPTH")
|
||||
parser.add_option("-o", "--outdir", dest="outdir",
|
||||
help="read files from directory OUT", metavar="OUT")
|
||||
parser.add_option("-k", "--kernel-name", dest="binary", default="zephyr",
|
||||
help="kernel binary name")
|
||||
parser.add_option("-r", "--ram",
|
||||
action="store_true", dest="ram", default=False,
|
||||
help="print RAM statistics")
|
||||
parser.add_option("-F", "--rom",
|
||||
action="store_true", dest="rom", default=False,
|
||||
help="print ROM statistics")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
# Return a dict containing symbol_name: path/to/file/where/it/originates
|
||||
# for all symbols from the .elf file. Optionnaly strips the path according
|
||||
# to the passed sub-path
|
||||
def load_symbols_and_paths(elf_file, path_to_strip = None):
|
||||
symbols_paths = {}
|
||||
bin_nm = os.environ.get("NM", "nm")
|
||||
nm_out = subprocess.check_output([bin_nm, elf_file, "-S", "-l", "--size-sort", "--radix=d"])
|
||||
for line in nm_out.decode('utf8').split('\n'):
|
||||
fields = line.replace('\t', ' ').split(' ')
|
||||
# Get rid of trailing empty field
|
||||
if len(fields) == 1 and fields[0] == '':
|
||||
continue
|
||||
assert len(fields)>=4
|
||||
if len(fields)<5:
|
||||
path = ":/" + fields[3]
|
||||
else:
|
||||
path = fields[4].split(':')[0]
|
||||
if path_to_strip != None:
|
||||
if path_to_strip in path:
|
||||
path = path.replace(path_to_strip, "") + '/' + fields[3]
|
||||
else:
|
||||
path = ":/" + fields[3]
|
||||
symbols_paths[fields[3]] = path
|
||||
return symbols_paths
|
||||
|
||||
def get_section_size(f, section_name):
|
||||
decimal_size = 0
|
||||
re_res = re.search(r"(.*] "+section_name+".*)", f, re.MULTILINE)
|
||||
if re_res != None :
|
||||
# Replace multiple spaces with one space
|
||||
# Skip first characters to avoid having 1 extra random space
|
||||
res = ' '.join(re_res.group(1).split())[5:]
|
||||
decimal_size = int(res.split()[4], 16)
|
||||
return decimal_size
|
||||
|
||||
def get_footprint_from_bin_and_statfile(bin_file, stat_file, total_flash, total_ram):
|
||||
"""Compute flash and RAM memory footprint from a .bin and.stat file"""
|
||||
f = open(stat_file).read()
|
||||
|
||||
# Get kctext + text + ctors + rodata + kcrodata segment size
|
||||
total_used_flash = os.path.getsize(bin_file)
|
||||
|
||||
#getting used ram on target
|
||||
total_used_ram = (get_section_size(f, "noinit") + get_section_size(f, "bss")
|
||||
+ get_section_size(f, "initlevel") + get_section_size(f, "datas") + get_section_size(f, ".data")
|
||||
+ get_section_size(f, ".heap") + get_section_size(f, ".stack") + get_section_size(f, ".bss")
|
||||
+ get_section_size(f, ".panic_section"))
|
||||
|
||||
total_percent_ram = 0
|
||||
total_percent_flash = 0
|
||||
if total_ram > 0:
|
||||
total_percent_ram = float(total_used_ram) / total_ram * 100
|
||||
if total_flash >0:
|
||||
total_percent_flash = float(total_used_flash) / total_flash * 100
|
||||
|
||||
res = { "total_flash": total_used_flash,
|
||||
"percent_flash": total_percent_flash,
|
||||
"total_ram": total_used_ram,
|
||||
"percent_ram": total_percent_ram}
|
||||
return res
|
||||
|
||||
def generate_target_memory_section(out, kernel_name, source_dir, features_json):
|
||||
features_path_data = None
|
||||
try:
|
||||
features_path_data = json.loads(open(features_json, 'r').read())
|
||||
except:
|
||||
pass
|
||||
|
||||
bin_file_abs = os.path.join(out, kernel_name+'.bin')
|
||||
elf_file_abs = os.path.join(out, kernel_name+'.elf')
|
||||
|
||||
# First deal with size on flash. These are the symbols flagged as LOAD in objdump output
|
||||
bin_objdump = os.environ.get("OBJDUMP", "objdump")
|
||||
size_out = subprocess.check_output([bin_objdump, "-hw", elf_file_abs])
|
||||
loaded_section_total = 0
|
||||
loaded_section_names = []
|
||||
loaded_section_names_sizes = {}
|
||||
ram_section_total = 0
|
||||
ram_section_names = []
|
||||
ram_section_names_sizes = {}
|
||||
for line in size_out.decode('utf8').split('\n'):
|
||||
if "LOAD" in line:
|
||||
loaded_section_total = loaded_section_total + int(line.split()[2], 16)
|
||||
loaded_section_names.append(line.split()[1])
|
||||
loaded_section_names_sizes[line.split()[1]] = int(line.split()[2], 16)
|
||||
if "ALLOC" in line and "READONLY" not in line and "rodata" not in line and "CODE" not in line:
|
||||
ram_section_total = ram_section_total + int(line.split()[2], 16)
|
||||
ram_section_names.append(line.split()[1])
|
||||
ram_section_names_sizes[line.split()[1]] = int(line.split()[2], 16)
|
||||
|
||||
# Actual .bin size, which doesn't not always match section sizes
|
||||
bin_size = os.stat(bin_file_abs).st_size
|
||||
|
||||
# Get the path associated to each symbol
|
||||
symbols_paths = load_symbols_and_paths(elf_file_abs, source_dir)
|
||||
|
||||
# A set of helper function for building a simple tree with a path-like
|
||||
# hierarchy.
|
||||
def _insert_one_elem(tree, path, size):
|
||||
splitted_path = path.split('/')
|
||||
cur = None
|
||||
for p in splitted_path:
|
||||
if cur == None:
|
||||
cur = p
|
||||
else:
|
||||
cur = cur + '/' + p
|
||||
if cur in tree:
|
||||
tree[cur] += size
|
||||
else:
|
||||
tree[cur] = size
|
||||
|
||||
def _parent_for_node(e):
|
||||
parent = "root" if len(e.split('/')) == 1 else e.rsplit('/', 1)[0]
|
||||
if e == "root":
|
||||
parent = None
|
||||
return parent
|
||||
|
||||
def _childs_for_node(tree, node):
|
||||
res = []
|
||||
for e in tree:
|
||||
if _parent_for_node(e) == node:
|
||||
res += [e]
|
||||
return res
|
||||
|
||||
def _siblings_for_node(tree, node):
|
||||
return _childs_for_node(tree, _parent_for_node(node))
|
||||
|
||||
def _max_sibling_size(tree, node):
|
||||
siblings = _siblings_for_node(tree, node)
|
||||
return max([tree[e] for e in siblings])
|
||||
|
||||
|
||||
# Extract the list of symbols a second time but this time using the objdump tool
|
||||
# which provides more info as nm
|
||||
bin_objdump = os.environ.get("OBJDUMP", "objdump")
|
||||
symbols_out = subprocess.check_output([bin_objdump, "-tw", elf_file_abs])
|
||||
flash_symbols_total = 0
|
||||
data_nodes = {}
|
||||
data_nodes['root'] = 0
|
||||
|
||||
ram_symbols_total = 0
|
||||
ram_nodes = {}
|
||||
ram_nodes['root'] = 0
|
||||
for l in symbols_out.decode('utf8').split('\n'):
|
||||
line = l[0:9] + "......." + l[16:]
|
||||
fields = line.replace('\t', ' ').split(' ')
|
||||
# Get rid of trailing empty field
|
||||
if len(fields) != 5:
|
||||
continue
|
||||
size = int(fields[3], 16)
|
||||
if fields[2] in loaded_section_names and size != 0:
|
||||
flash_symbols_total += size
|
||||
_insert_one_elem(data_nodes, symbols_paths[fields[4]], size)
|
||||
if fields[2] in ram_section_names and size != 0:
|
||||
ram_symbols_total += size
|
||||
_insert_one_elem(ram_nodes, symbols_paths[fields[4]], size)
|
||||
|
||||
def _init_features_list_results(features_list):
|
||||
for feature in features_list:
|
||||
_init_feature_results(feature)
|
||||
|
||||
def _init_feature_results(feature):
|
||||
feature["size"] = 0
|
||||
# recursive through children
|
||||
for child in feature["children"]:
|
||||
_init_feature_results(child)
|
||||
|
||||
def _check_all_symbols(symbols_struct, features_list):
|
||||
out = ""
|
||||
sorted_nodes = sorted(symbols_struct.items(), key=operator.itemgetter(0))
|
||||
named_symbol_filter = re.compile('.*\.[a-zA-Z]+/.*')
|
||||
out_symbols_filter = re.compile('^:/')
|
||||
for symbpath in sorted_nodes:
|
||||
matched = 0
|
||||
# The files and folders (not matching regex) are discarded
|
||||
# like: folder folder/file.ext
|
||||
is_symbol=named_symbol_filter.match(symbpath[0])
|
||||
is_generated=out_symbols_filter.match(symbpath[0])
|
||||
if is_symbol == None and is_generated == None:
|
||||
continue
|
||||
# The symbols inside a file are kept: folder/file.ext/symbol
|
||||
# and unrecognized paths too (":/")
|
||||
for feature in features_list:
|
||||
matched = matched + _does_symbol_matches_feature(symbpath[0], symbpath[1], feature)
|
||||
if matched is 0:
|
||||
out += "UNCATEGORIZED: %s %d<br/>" % (symbpath[0], symbpath[1])
|
||||
return out
|
||||
|
||||
def _does_symbol_matches_feature(symbol, size, feature):
|
||||
matched = 0
|
||||
# check each include-filter in feature
|
||||
for inc_path in feature["folders"]:
|
||||
# filter out if the include-filter is not in the symbol string
|
||||
if inc_path not in symbol:
|
||||
continue
|
||||
# if the symbol match the include-filter, check against exclude-filter
|
||||
is_excluded = 0
|
||||
for exc_path in feature["excludes"]:
|
||||
if exc_path in symbol:
|
||||
is_excluded = 1
|
||||
break
|
||||
if is_excluded == 0:
|
||||
matched = 1
|
||||
feature["size"] = feature["size"] + size
|
||||
# it can only be matched once per feature (add size once)
|
||||
break
|
||||
# check children independently of this feature's result
|
||||
for child in feature["children"]:
|
||||
child_matched = _does_symbol_matches_feature(symbol, size, child)
|
||||
matched = matched + child_matched
|
||||
return matched
|
||||
|
||||
|
||||
|
||||
# Create a simplified tree keeping only the most important contributors
|
||||
# This is used for the pie diagram summary
|
||||
min_parent_size = bin_size/25
|
||||
min_sibling_size = bin_size/35
|
||||
tmp = {}
|
||||
for e in data_nodes:
|
||||
if _parent_for_node(e) == None:
|
||||
continue
|
||||
if data_nodes[_parent_for_node(e)] < min_parent_size:
|
||||
continue
|
||||
if _max_sibling_size(data_nodes, e) < min_sibling_size:
|
||||
continue
|
||||
tmp[e] = data_nodes[e]
|
||||
|
||||
# Keep only final nodes
|
||||
tmp2 = {}
|
||||
for e in tmp:
|
||||
if len(_childs_for_node(tmp, e)) == 0:
|
||||
tmp2[e] = tmp[e]
|
||||
|
||||
# Group nodes too small in an "other" section
|
||||
filtered_data_nodes = {}
|
||||
for e in tmp2:
|
||||
if tmp[e] < min_sibling_size:
|
||||
k = _parent_for_node(e) + "/(other)"
|
||||
if k in filtered_data_nodes:
|
||||
filtered_data_nodes[k] += tmp[e]
|
||||
else:
|
||||
filtered_data_nodes[k] = tmp[e]
|
||||
else:
|
||||
filtered_data_nodes[e] = tmp[e]
|
||||
|
||||
|
||||
def _parent_level_3_at_most(node):
|
||||
e = _parent_for_node(node)
|
||||
while e.count('/')>2:
|
||||
e = _parent_for_node(e)
|
||||
return e
|
||||
|
||||
return ram_nodes, data_nodes
|
||||
|
||||
|
||||
def print_tree(data, total, depth):
|
||||
base = os.environ['ZEPHYR_BASE']
|
||||
totp = 0
|
||||
print('{:92s} {:10s} {:8s}'.format(bcolors.FAIL + "Path", "Size", "%" + bcolors.ENDC))
|
||||
print("'='*110i")
|
||||
for i in sorted(data):
|
||||
p = i.split("/")
|
||||
if depth and len(p) > depth:
|
||||
continue
|
||||
|
||||
percent = 100 * float(data[i])/float(total)
|
||||
percent_c = percent
|
||||
if len(p) < 2:
|
||||
totp += percent
|
||||
|
||||
if len(p) > 1:
|
||||
if not os.path.exists(os.path.join(base, i)):
|
||||
s = bcolors.WARNING + p[-1] + bcolors.ENDC
|
||||
else:
|
||||
s = bcolors.OKBLUE + p[-1] + bcolors.ENDC
|
||||
print('{:80s} {:20d} {:8.2f}%'.format(" "*(len(p)-1) + s, data[i], percent_c ))
|
||||
else:
|
||||
print('{:80s} {:20d} {:8.2f}%'.format(bcolors.OKBLUE + i + bcolors.ENDC, data[i], percent_c ))
|
||||
|
||||
print('='*110)
|
||||
print('{:92d}'.format(total))
|
||||
return totp
|
||||
|
||||
|
||||
binary = os.path.join(options.outdir, options.binary + ".elf")
|
||||
|
||||
if options.outdir and os.path.exists(binary):
|
||||
fp = get_footprint_from_bin_and_statfile("%s/%s.bin" %(options.outdir, options.binary),
|
||||
"%s/%s.stat" %(options.outdir,options.binary), 0, 0 )
|
||||
base = os.environ['ZEPHYR_BASE']
|
||||
ram, data = generate_target_memory_section(options.outdir, options.binary, base + '/', None)
|
||||
if options.rom:
|
||||
print_tree(data, fp['total_flash'], options.depth)
|
||||
if options.ram:
|
||||
print_tree(ram, fp['total_ram'], options.depth)
|
||||
|
||||
else:
|
||||
print("%s does not exist." %(binary))
|
Loading…
Add table
Add a link
Reference in a new issue