zephyr/scripts/size_report
Vinayak Kariappa Chettimada f16f88555f scripts: Fix size_report to use correct objdump and nm
Commands for objdump and nm where hardcoded in size_report
script, which failed on MacOS as it tried to use ones from
Xcode. Fixed the script to pick the right objdump and nm
from the toolchain being used to build the application.

Signed-off-by: Vinayak Kariappa Chettimada <vich@nordicsemi.no>
2017-07-13 14:27:49 -05:00

344 lines
12 KiB
Python
Executable file

#!/usr/bin/env python3
#
# Copyright (c) 2016, Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# Based on a script by:
# Chereau, Fabien <fabien.chereau@intel.com>
import os
import re
from optparse import OptionParser
import sys
import argparse
import subprocess
import json
import operator
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
parser = OptionParser()
parser.add_option("-d", "--depth", dest="depth", type="int",
help="How deep should we go into the tree", metavar="DEPTH")
parser.add_option("-o", "--outdir", dest="outdir",
help="read files from directory OUT", metavar="OUT")
parser.add_option("-k", "--kernel-name", dest="binary", default="zephyr",
help="kernel binary name")
parser.add_option("-r", "--ram",
action="store_true", dest="ram", default=False,
help="print RAM statistics")
parser.add_option("-F", "--rom",
action="store_true", dest="rom", default=False,
help="print ROM statistics")
(options, args) = parser.parse_args()
# Return a dict containing symbol_name: path/to/file/where/it/originates
# for all symbols from the .elf file. Optionnaly strips the path according
# to the passed sub-path
def load_symbols_and_paths(elf_file, path_to_strip = None):
symbols_paths = {}
bin_nm = os.environ.get("NM", "nm")
nm_out = subprocess.check_output([bin_nm, elf_file, "-S", "-l", "--size-sort", "--radix=d"])
for line in nm_out.decode('utf8').split('\n'):
fields = line.replace('\t', ' ').split(' ')
# Get rid of trailing empty field
if len(fields) == 1 and fields[0] == '':
continue
assert len(fields)>=4
if len(fields)<5:
path = ":/" + fields[3]
else:
path = fields[4].split(':')[0]
if path_to_strip != None:
if path_to_strip in path:
path = path.replace(path_to_strip, "") + '/' + fields[3]
else:
path = ":/" + fields[3]
symbols_paths[fields[3]] = path
return symbols_paths
def get_section_size(f, section_name):
decimal_size = 0
re_res = re.search(r"(.*] "+section_name+".*)", f, re.MULTILINE)
if re_res != None :
# Replace multiple spaces with one space
# Skip first characters to avoid having 1 extra random space
res = ' '.join(re_res.group(1).split())[5:]
decimal_size = int(res.split()[4], 16)
return decimal_size
def get_footprint_from_bin_and_statfile(bin_file, stat_file, total_flash, total_ram):
"""Compute flash and RAM memory footprint from a .bin and.stat file"""
f = open(stat_file).read()
# Get kctext + text + ctors + rodata + kcrodata segment size
total_used_flash = os.path.getsize(bin_file)
#getting used ram on target
total_used_ram = (get_section_size(f, "noinit") + get_section_size(f, "bss")
+ get_section_size(f, "initlevel") + get_section_size(f, "datas") + get_section_size(f, ".data")
+ get_section_size(f, ".heap") + get_section_size(f, ".stack") + get_section_size(f, ".bss")
+ get_section_size(f, ".panic_section"))
total_percent_ram = 0
total_percent_flash = 0
if total_ram > 0:
total_percent_ram = float(total_used_ram) / total_ram * 100
if total_flash >0:
total_percent_flash = float(total_used_flash) / total_flash * 100
res = { "total_flash": total_used_flash,
"percent_flash": total_percent_flash,
"total_ram": total_used_ram,
"percent_ram": total_percent_ram}
return res
def generate_target_memory_section(out, kernel_name, source_dir, features_json):
features_path_data = None
try:
features_path_data = json.loads(open(features_json, 'r').read())
except:
pass
bin_file_abs = os.path.join(out, kernel_name+'.bin')
elf_file_abs = os.path.join(out, kernel_name+'.elf')
# First deal with size on flash. These are the symbols flagged as LOAD in objdump output
bin_objdump = os.environ.get("OBJDUMP", "objdump")
size_out = subprocess.check_output([bin_objdump, "-hw", elf_file_abs])
loaded_section_total = 0
loaded_section_names = []
loaded_section_names_sizes = {}
ram_section_total = 0
ram_section_names = []
ram_section_names_sizes = {}
for line in size_out.decode('utf8').split('\n'):
if "LOAD" in line:
loaded_section_total = loaded_section_total + int(line.split()[2], 16)
loaded_section_names.append(line.split()[1])
loaded_section_names_sizes[line.split()[1]] = int(line.split()[2], 16)
if "ALLOC" in line and "READONLY" not in line and "rodata" not in line and "CODE" not in line:
ram_section_total = ram_section_total + int(line.split()[2], 16)
ram_section_names.append(line.split()[1])
ram_section_names_sizes[line.split()[1]] = int(line.split()[2], 16)
# Actual .bin size, which doesn't not always match section sizes
bin_size = os.stat(bin_file_abs).st_size
# Get the path associated to each symbol
symbols_paths = load_symbols_and_paths(elf_file_abs, source_dir)
# A set of helper function for building a simple tree with a path-like
# hierarchy.
def _insert_one_elem(tree, path, size):
splitted_path = path.split('/')
cur = None
for p in splitted_path:
if cur == None:
cur = p
else:
cur = cur + '/' + p
if cur in tree:
tree[cur] += size
else:
tree[cur] = size
def _parent_for_node(e):
parent = "root" if len(e.split('/')) == 1 else e.rsplit('/', 1)[0]
if e == "root":
parent = None
return parent
def _childs_for_node(tree, node):
res = []
for e in tree:
if _parent_for_node(e) == node:
res += [e]
return res
def _siblings_for_node(tree, node):
return _childs_for_node(tree, _parent_for_node(node))
def _max_sibling_size(tree, node):
siblings = _siblings_for_node(tree, node)
return max([tree[e] for e in siblings])
# Extract the list of symbols a second time but this time using the objdump tool
# which provides more info as nm
bin_objdump = os.environ.get("OBJDUMP", "objdump")
symbols_out = subprocess.check_output([bin_objdump, "-tw", elf_file_abs])
flash_symbols_total = 0
data_nodes = {}
data_nodes['root'] = 0
ram_symbols_total = 0
ram_nodes = {}
ram_nodes['root'] = 0
for l in symbols_out.decode('utf8').split('\n'):
line = l[0:9] + "......." + l[16:]
fields = line.replace('\t', ' ').split(' ')
# Get rid of trailing empty field
if len(fields) != 5:
continue
size = int(fields[3], 16)
if fields[2] in loaded_section_names and size != 0:
flash_symbols_total += size
_insert_one_elem(data_nodes, symbols_paths[fields[4]], size)
if fields[2] in ram_section_names and size != 0:
ram_symbols_total += size
_insert_one_elem(ram_nodes, symbols_paths[fields[4]], size)
def _init_features_list_results(features_list):
for feature in features_list:
_init_feature_results(feature)
def _init_feature_results(feature):
feature["size"] = 0
# recursive through children
for child in feature["children"]:
_init_feature_results(child)
def _check_all_symbols(symbols_struct, features_list):
out = ""
sorted_nodes = sorted(symbols_struct.items(), key=operator.itemgetter(0))
named_symbol_filter = re.compile('.*\.[a-zA-Z]+/.*')
out_symbols_filter = re.compile('^:/')
for symbpath in sorted_nodes:
matched = 0
# The files and folders (not matching regex) are discarded
# like: folder folder/file.ext
is_symbol=named_symbol_filter.match(symbpath[0])
is_generated=out_symbols_filter.match(symbpath[0])
if is_symbol == None and is_generated == None:
continue
# The symbols inside a file are kept: folder/file.ext/symbol
# and unrecognized paths too (":/")
for feature in features_list:
matched = matched + _does_symbol_matches_feature(symbpath[0], symbpath[1], feature)
if matched is 0:
out += "UNCATEGORIZED: %s %d<br/>" % (symbpath[0], symbpath[1])
return out
def _does_symbol_matches_feature(symbol, size, feature):
matched = 0
# check each include-filter in feature
for inc_path in feature["folders"]:
# filter out if the include-filter is not in the symbol string
if inc_path not in symbol:
continue
# if the symbol match the include-filter, check against exclude-filter
is_excluded = 0
for exc_path in feature["excludes"]:
if exc_path in symbol:
is_excluded = 1
break
if is_excluded == 0:
matched = 1
feature["size"] = feature["size"] + size
# it can only be matched once per feature (add size once)
break
# check children independently of this feature's result
for child in feature["children"]:
child_matched = _does_symbol_matches_feature(symbol, size, child)
matched = matched + child_matched
return matched
# Create a simplified tree keeping only the most important contributors
# This is used for the pie diagram summary
min_parent_size = bin_size/25
min_sibling_size = bin_size/35
tmp = {}
for e in data_nodes:
if _parent_for_node(e) == None:
continue
if data_nodes[_parent_for_node(e)] < min_parent_size:
continue
if _max_sibling_size(data_nodes, e) < min_sibling_size:
continue
tmp[e] = data_nodes[e]
# Keep only final nodes
tmp2 = {}
for e in tmp:
if len(_childs_for_node(tmp, e)) == 0:
tmp2[e] = tmp[e]
# Group nodes too small in an "other" section
filtered_data_nodes = {}
for e in tmp2:
if tmp[e] < min_sibling_size:
k = _parent_for_node(e) + "/(other)"
if k in filtered_data_nodes:
filtered_data_nodes[k] += tmp[e]
else:
filtered_data_nodes[k] = tmp[e]
else:
filtered_data_nodes[e] = tmp[e]
def _parent_level_3_at_most(node):
e = _parent_for_node(node)
while e.count('/')>2:
e = _parent_for_node(e)
return e
return ram_nodes, data_nodes
def print_tree(data, total, depth):
base = os.environ['ZEPHYR_BASE']
totp = 0
print('{:92s} {:10s} {:8s}'.format(bcolors.FAIL + "Path", "Size", "%" + bcolors.ENDC))
print("'='*110i")
for i in sorted(data):
p = i.split("/")
if depth and len(p) > depth:
continue
percent = 100 * float(data[i])/float(total)
percent_c = percent
if len(p) < 2:
totp += percent
if len(p) > 1:
if not os.path.exists(os.path.join(base, i)):
s = bcolors.WARNING + p[-1] + bcolors.ENDC
else:
s = bcolors.OKBLUE + p[-1] + bcolors.ENDC
print('{:80s} {:20d} {:8.2f}%'.format(" "*(len(p)-1) + s, data[i], percent_c ))
else:
print('{:80s} {:20d} {:8.2f}%'.format(bcolors.OKBLUE + i + bcolors.ENDC, data[i], percent_c ))
print('='*110)
print('{:92d}'.format(total))
return totp
binary = os.path.join(options.outdir, options.binary + ".elf")
if options.outdir and os.path.exists(binary):
fp = get_footprint_from_bin_and_statfile("%s/%s.bin" %(options.outdir, options.binary),
"%s/%s.stat" %(options.outdir,options.binary), 0, 0 )
base = os.environ['ZEPHYR_BASE']
ram, data = generate_target_memory_section(options.outdir, options.binary, base + '/', None)
if options.rom:
print_tree(data, fp['total_flash'], options.depth)
if options.ram:
print_tree(ram, fp['total_ram'], options.depth)
else:
print("%s does not exist." %(binary))