diff --git a/Kbuild b/Kbuild index 787fe228326..89b70088201 100644 --- a/Kbuild +++ b/Kbuild @@ -73,10 +73,12 @@ misc/generated/configs.c: include/config/auto.conf FORCE $(call filechk,configs.c) targets := misc/generated/configs.c +targets += include/generated/generated_dts_board.h targets += include/generated/offsets.h always := misc/generated/configs.c +always += include/generated/generated_dts_board.h always += include/generated/offsets.h ifeq ($(CONFIG_MDEF),y) @@ -90,7 +92,8 @@ endef cmd_cc_o_c_1 = $(CC) $(KBUILD_CFLAGS) $(ZEPHYRINCLUDE) -c -o $@ $< -arch/$(ARCH)/core/offsets/offsets.o: arch/$(ARCH)/core/offsets/offsets.c $(KCONFIG_CONFIG) +arch/$(ARCH)/core/offsets/offsets.o: arch/$(ARCH)/core/offsets/offsets.c $(KCONFIG_CONFIG) \ + include/generated/generated_dts_board.h $(Q)mkdir -p $(dir $@) $(call if_changed,cc_o_c_1) @@ -109,6 +112,31 @@ define offsetchk endef include/generated/offsets.h: arch/$(ARCH)/core/offsets/offsets.o \ - include/config/auto.conf FORCE + include/config/auto.conf FORCE $(call offsetchk,arch/$(ARCH)/core/offsets/offsets.o) +ifeq ($(CONFIG_HAS_DTS),y) +define filechk_generated_dts_board.h + (echo "/* WARNING. THIS FILE IS AUTO-GENERATED. DO NOT MODIFY! */"; \ + extract_dts_includes.py dts/$(ARCH)/$(BOARD_NAME).dts_compiled $(ZEPHYR_BASE)/dts/$(ARCH)/yaml; \ + if test -e $(ZEPHYR_BASE)/dts/$(ARCH)/$(BOARD_NAME).fixup; then \ + echo; echo; \ + echo "/* Following definitions fixup the generated include */"; \ + echo; \ + cat $(ZEPHYR_BASE)/dts/$(ARCH)/$(BOARD_NAME).fixup; \ + fi; \ + ) +endef +else +define filechk_generated_dts_board.h + (echo "/* WARNING. THIS FILE IS AUTO-GENERATED. DO NOT MODIFY! */";) +endef +endif + + +include/generated/generated_dts_board.h: include/config/auto.conf FORCE +ifeq ($(CONFIG_HAS_DTS),y) + $(Q)$(MAKE) $(build)=dts/$(ARCH) +endif + $(call filechk,generated_dts_board.h) + diff --git a/Makefile b/Makefile index 6307e9662d3..e67a1e1c4aa 100644 --- a/Makefile +++ b/Makefile @@ -1019,12 +1019,14 @@ depend dep: # Directories & files removed with 'make clean' CLEAN_DIRS += $(MODVERDIR) +CLEAN_DIRS += $(MODVERDIR) dts/ CLEAN_FILES += misc/generated/sysgen/kernel_main.c \ misc/generated/sysgen/sysgen.h \ misc/generated/sysgen/prj.mdef \ misc/generated/sysgen/micro_private_types.h \ misc/generated/sysgen/kernel_main.h \ + include/generated/generated_dts_board.h \ .old_version .tmp_System.map .tmp_version \ .tmp_* System.map *.lnk *.map *.elf *.lst \ *.bin *.hex *.stat *.strip staticIdt.o linker.cmd diff --git a/arch/arm/core/cortex_m/Kconfig b/arch/arm/core/cortex_m/Kconfig index df8ab118198..4fcdd7d7774 100644 --- a/arch/arm/core/cortex_m/Kconfig +++ b/arch/arm/core/cortex_m/Kconfig @@ -170,6 +170,7 @@ config XIP config SRAM_SIZE int "SRAM Size in kB" + depends on !HAS_DTS help This option specifies the size of the SRAM in kB. It is normally set by the board's defconfig file and the user should generally avoid modifying @@ -177,6 +178,7 @@ config SRAM_SIZE config SRAM_BASE_ADDRESS hex "SRAM Base Address" + depends on !HAS_DTS help This option specifies the base address of the SRAM on the board. It is normally set by the board's defconfig file and the user should generally @@ -184,6 +186,7 @@ config SRAM_BASE_ADDRESS config FLASH_SIZE int "Flash Size in kB" + depends on !HAS_DTS help This option specifies the size of the flash in kB. It is normally set by the board's defconfig file and the user should generally avoid modifying @@ -191,6 +194,7 @@ config FLASH_SIZE config FLASH_BASE_ADDRESS hex "Flash Base Address" + depends on !HAS_DTS help This option specifies the base address of the flash on the board. It is normally set by the board's defconfig file and the user should generally diff --git a/arch/arm/core/cortex_m/irq_vector_table.c b/arch/arm/core/cortex_m/irq_vector_table.c index 25c3e1e8d3d..e35aa791bb4 100644 --- a/arch/arm/core/cortex_m/irq_vector_table.c +++ b/arch/arm/core/cortex_m/irq_vector_table.c @@ -21,6 +21,7 @@ #include #include +#include extern void _isr_wrapper(void); typedef void (*vth)(void); /* Vector Table Handler */ diff --git a/arch/arm/core/cortex_m/prep_c.c b/arch/arm/core/cortex_m/prep_c.c index 19c31d5063d..e7a7d1f0643 100644 --- a/arch/arm/core/cortex_m/prep_c.c +++ b/arch/arm/core/cortex_m/prep_c.c @@ -22,6 +22,7 @@ #include #include #include +#include #ifdef CONFIG_ARMV6_M static inline void relocate_vector_table(void) { /* do nothing */ } diff --git a/arch/arm/core/cortex_m/reset.S b/arch/arm/core/cortex_m/reset.S index d586d2c799b..a6b2bd2736e 100644 --- a/arch/arm/core/cortex_m/reset.S +++ b/arch/arm/core/cortex_m/reset.S @@ -17,6 +17,7 @@ #include #include #include "vector_table.h" +#include _ASM_FILE_PROLOGUE diff --git a/dts/arm/Makefile b/dts/arm/Makefile new file mode 100644 index 00000000000..60d911e4227 --- /dev/null +++ b/dts/arm/Makefile @@ -0,0 +1,3 @@ +ifeq ($(CONFIG_HAS_DTS),y) +always := $(dtb-y) +endif diff --git a/include/arch/arm/cortex_m/scripts/linker.ld b/include/arch/arm/cortex_m/scripts/linker.ld index ffe10e1ca42..ccf6fc25045 100644 --- a/include/arch/arm/cortex_m/scripts/linker.ld +++ b/include/arch/arm/cortex_m/scripts/linker.ld @@ -16,6 +16,7 @@ #include #include +#include #include #include diff --git a/include/sw_isr_table.h b/include/sw_isr_table.h index 48fefb1476f..51239c05b38 100644 --- a/include/sw_isr_table.h +++ b/include/sw_isr_table.h @@ -15,6 +15,7 @@ #define _SW_ISR_TABLE__H_ #include +#include #ifdef __cplusplus extern "C" { diff --git a/kernel/Kconfig b/kernel/Kconfig index e94d8e53134..ea5f75eebc3 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -8,6 +8,13 @@ menu "General Kernel Options" + +config HAS_DTS + bool "Uses Device Tree" + help + This option specifies that the target platform supports device tree + configuration. + config MULTITHREADING bool prompt "Multi-threading" diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 29aa73d5c8a..5039525d1b3 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -153,6 +153,8 @@ cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(ZEPHYRINCLUDE) \ ld_flags = $(LDFLAGS) $(ldflags-y) dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \ + $(ZEPHYRINCLUDE) \ + -I$(srctree)/dts/common \ -I$(srctree)/drivers/of/testcase-data \ -undef -D__DTS__ @@ -276,9 +278,10 @@ cmd_dt_S_dtb= \ $(obj)/%.dtb.S: $(obj)/%.dtb $(call cmd,dt_S_dtb) + quiet_cmd_dtc = DTC $@ cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \ - $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \ + $(DTC) -O dts -o $@ -b 0 \ -i $(dir $<) $(DTC_FLAGS) \ -d $(depfile).dtc.tmp $(dtc-tmp) ; \ cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile) @@ -286,6 +289,9 @@ cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \ $(obj)/%.dtb: $(src)/%.dts FORCE $(call if_changed_dep,dtc) +$(obj)/%.dts_compiled: $(src)/%.dts FORCE + $(call if_changed_dep,dtc) + dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp) # Bzip2 diff --git a/scripts/Makefile.toolchain.zephyr b/scripts/Makefile.toolchain.zephyr index bc60b8922aa..f5796e3c653 100644 --- a/scripts/Makefile.toolchain.zephyr +++ b/scripts/Makefile.toolchain.zephyr @@ -109,7 +109,7 @@ QEMU_BIOS=$(TOOLCHAIN_HOME)/usr/share/qemu TOOLCHAIN_LIBS = gcc -DTC ?= ${TOOLCHAIN_HOME}/usr/bin/dtc +DTC = ${TOOLCHAIN_HOME}/usr/bin/dtc OPENOCD ?= ${TOOLCHAIN_HOME}/usr/bin/openocd OPENOCD_DEFAULT_PATH ?= ${TOOLCHAIN_HOME}/usr/share/openocd/scripts diff --git a/scripts/extract_dts_includes.py b/scripts/extract_dts_includes.py new file mode 100755 index 00000000000..bd291ff4d3f --- /dev/null +++ b/scripts/extract_dts_includes.py @@ -0,0 +1,632 @@ +#!/usr/bin/python +import sys +from os import walk +import os +import re +import yaml +import pprint + +from devicetree import parse_file + +# globals +compatibles = {} +phandles = {} +aliases = {} +chosen = {} +reduced = {} + +def convert_string_to_label(s): + # Transmute ,- to _ + s = s.replace("-", "_"); + s = s.replace(",", "_"); + return s + +def get_all_compatibles(d, name, comp_dict): + if 'props' in d: + compat = d['props'].get('compatible') + enabled = d['props'].get('status') + + if enabled == "disabled": + return comp_dict + + if compat != None: + comp_dict[name] = compat + + if name != '/': + name += '/' + + if isinstance(d,dict): + if d['children']: + for k,v in d['children'].items(): + get_all_compatibles(v, name + k, comp_dict) + + return comp_dict + +def get_aliases(root): + if 'children' in root: + if 'aliases' in root['children']: + for k,v in root['children']['aliases']['props'].items(): + aliases[v] = k + + return + +def get_compat(node): + + compat = None + + if 'props' in node: + compat = node['props'].get('compatible') + + if isinstance(compat, list): + compat = compat[0] + + return compat + +def get_chosen(root): + + if 'children' in root: + if 'chosen' in root['children']: + for k,v in root['children']['chosen']['props'].items(): + chosen[k] = v + + return + +def get_phandles(root, name, handles): + + if 'props' in root: + handle = root['props'].get('phandle') + enabled = root['props'].get('status') + + if enabled == "disabled": + return + + if handle != None: + phandles[handle] = name + + if name != '/': + name += '/' + + if isinstance(root, dict): + if root['children']: + for k,v in root['children'].items(): + get_phandles(v, name + k, handles) + + return + +class Loader(yaml.Loader): + def __init__(self, stream): + self._root = os.path.realpath(stream.name) + super(Loader, self).__init__(stream) + Loader.add_constructor('!include', Loader.include) + Loader.add_constructor('!import', Loader.include) + + def include(self, node): + if isinstance(node, yaml.ScalarNode): + return self.extractFile(self.construct_scalar(node)) + + elif isinstance(node, yaml.SequenceNode): + result = [] + for filename in self.construct_sequence(node): + result += self.extractFile(filename) + return result + + elif isinstance(node, yaml.MappingNode): + result = {} + for k,v in self.construct_mapping(node).iteritems(): + result[k] = self.extractFile(v) + return result + + else: + print("Error:: unrecognised node type in !include statement") + raise yaml.constructor.ConstructorError + + def extractFile(self, filename): + filepath = os.path.join(os.path.dirname(self._root), filename) + if not os.path.isfile(filepath): + # we need to look in common directory + # take path and back up 2 directories and tack on '/common/yaml' + filepath = os.path.dirname(self._root).split('/') + filepath = '/'.join(filepath[:-2]) + filepath = os.path.join(filepath + '/common/yaml', filename) + with open(filepath, 'r') as f: + return yaml.load(f, Loader) + +def insert_defs(node_address, defs, new_defs, new_aliases): + if node_address in defs: + if 'aliases' in defs[node_address]: + defs[node_address]['aliases'].update(new_aliases) + else: + defs[node_address]['aliases'] = new_aliases + + defs[node_address].update(new_defs) + else: + new_defs['aliases'] = new_aliases + defs[node_address] = new_defs + + return + +def find_node_by_path(nodes, path): + d = nodes + for k in path[1:].split('/'): + d = d['children'][k] + + return d + +def compress_nodes(nodes, path): + if 'props' in nodes: + status = nodes['props'].get('status') + + if status == "disabled": + return + + if isinstance(nodes, dict): + reduced[path] = dict(nodes) + reduced[path].pop('children', None) + if path != '/': + path += '/' + if nodes['children']: + for k,v in nodes['children'].items(): + compress_nodes(v, path + k) + + return + +def find_parent_irq_node(node_address): + address = '' + + for comp in node_address.split('/')[1:]: + address += '/' + comp + if 'interrupt-parent' in reduced[address]['props']: + interrupt_parent = reduced[address]['props'].get('interrupt-parent') + + return reduced[phandles[interrupt_parent]] + +def extract_interrupts(node_address, yaml, y_key, names, defs, def_label): + node = reduced[node_address] + + try: + props = list(node['props'].get(y_key)) + except: + props = [node['props'].get(y_key)] + + irq_parent = find_parent_irq_node(node_address) + + l_base = def_label.split('/') + index = 0 + + while props: + prop_def = {} + prop_alias = {} + l_idx = [str(index)] + + if y_key == 'interrupts-extended': + cell_parent = reduced[phandles[props.pop(0)]] + name = [] + else: + try: + name = [names.pop(0).upper()] + except: + name = [] + + cell_parent = irq_parent + + cell_yaml = yaml[get_compat(cell_parent)] + l_cell_prefix = [yaml[get_compat(irq_parent)].get('cell_prefix', []).upper()] + + for i in range(cell_parent['props']['#interrupt-cells']): + l_cell_name = [cell_yaml['#cells'][i].upper()] + if l_cell_name == l_cell_prefix: + l_cell_name = [] + + l_fqn = '_'.join(l_base + l_cell_prefix + l_idx) + prop_def[l_fqn] = props.pop(0) + if len(name): + prop_alias['_'.join(l_base + name + l_cell_prefix)] = l_fqn + + index += 1 + insert_defs(node_address, defs, prop_def, prop_alias) + + return + +def extract_reg_prop(node_address, names, defs, def_label, div): + node = reduced[node_address] + + props = list(reduced[node_address]['props']['reg']) + + address_cells = reduced['/']['props'].get('#address-cells') + size_cells = reduced['/']['props'].get('#size-cells') + address = '' + for comp in node_address.split('/')[1:]: + address += '/' + comp + address_cells = reduced[address]['props'].get('#address-cells', address_cells) + size_cells = reduced[address]['props'].get('#size-cells', size_cells) + + index = 0 + l_base = def_label.split('/') + l_addr = ["BASE_ADDRESS"] + l_size = ["SIZE"] + + while props: + prop_def = {} + prop_alias = {} + addr = 0 + size = 0 + l_idx = [str(index)] + + try: + name = [names.pop(0).upper()] + except: + name = [] + + for x in range(address_cells): + addr += props.pop(0) << (32 * x) + for x in range(size_cells): + size += props.pop(0) << (32 * x) + + l_addr_fqn = '_'.join(l_base + l_addr + l_idx) + l_size_fqn = '_'.join(l_base + l_size + l_idx) + prop_def[l_addr_fqn] = hex(addr) + prop_def[l_size_fqn] = int(size / div) + if len(name): + prop_alias['_'.join(l_base + name + l_addr)] = l_addr_fqn + prop_alias['_'.join(l_base + name + l_size)] = l_size_fqn + + if index == 0: + prop_alias['_'.join(l_base + l_addr)] = l_addr_fqn + prop_alias['_'.join(l_base + l_size)] = l_size_fqn + + insert_defs(node_address, defs, prop_def, prop_alias) + + # increment index for definition creation + index += 1 + + return + +def extract_cells(node_address, yaml, y_key, names, index, prefix, defs, def_label): + try: + props = list(reduced[node_address]['props'].get(y_key)) + except: + props = [reduced[node_address]['props'].get(y_key)] + + cell_parent = reduced[phandles[props.pop(0)]] + + try: + cell_yaml = yaml[get_compat(cell_parent)] + except: + raise Exception("Could not find yaml description for " + cell_parent['name']) + + try: + name = names.pop(0).upper() + except: + name = [] + + l_cell = [str(cell_yaml.get('cell_string',''))] + l_base = def_label.split('/') + l_base += prefix + l_idx = [str(index)] + + prop_def = {} + prop_alias = {} + + for k in cell_parent['props'].keys(): + if k[0] == '#' and '-cells' in k: + for i in range(cell_parent['props'].get(k)): + l_cellname = [str(cell_yaml['#cells'][i]).upper()] + if l_cell == l_cellname: + label = l_base + l_cell + l_idx + else: + label = l_base + l_cell + l_cellname + l_idx + label_name = l_base + name + l_cellname + prop_def['_'.join(label)] = props.pop(0) + if len(name): + prop_alias['_'.join(label_name)] = '_'.join(label) + + if index == 0: + prop_alias['_'.join(label[:-1])] = '_'.join(label) + + insert_defs(node_address, defs, prop_def, prop_alias) + + # recurse if we have anything left + if len(props): + extract_cells(node_address, yaml, y_key, names, index + 1, prefix, defs, def_label) + + return + +def extract_pinctrl(node_address, yaml, pinconf, names, index, defs, def_label): + + prop_list = [] + if not isinstance(pinconf,list): + prop_list.append(pinconf) + else: + prop_list = list(pinconf) + + def_prefix = def_label.split('_') + target_node = node_address + + prop_def = {} + for p in prop_list: + pin_node_address = phandles[p] + pin_entry = reduced[pin_node_address] + parent_address = '/'.join(pin_node_address.split('/')[:-1]) + pin_parent = reduced[parent_address] + cell_yaml = yaml[get_compat(pin_parent)] + cell_prefix = cell_yaml.get('cell_string', None) + post_fix = [] + + if cell_prefix != None: + post_fix.append(cell_prefix) + + for subnode in reduced.keys(): + if pin_node_address in subnode and pin_node_address != subnode: + # found a subnode underneath the pinmux handle + node_label = subnode.split('/')[-2:] + pin_label = def_prefix + post_fix + subnode.split('/')[-2:] + + for i, pin in enumerate(reduced[subnode]['props']['pins']): + key_label = list(pin_label) + [cell_yaml['#cells'][0]] + [str(i)] + func_label = key_label[:-2] + [cell_yaml['#cells'][1]] + [str(i)] + key_label = convert_string_to_label('_'.join(key_label)).upper() + func_label = convert_string_to_label('_'.join(func_label)).upper() + + prop_def[key_label] = pin + prop_def[func_label] = reduced[subnode]['props']['function'] + + insert_defs(node_address, defs, prop_def, {}) + +def extract_single(node_address, yaml, prop, key, prefix, defs, def_label): + + prop_def = {} + + if isinstance(prop, list): + for i, p in enumerate(prop): + k = convert_string_to_label(key).upper() + label = def_label + '_' + k + prop_def[label + '_' + str(i)] = p + else: + k = convert_string_to_label(key).upper() + label = def_label + '_' + k + prop_def[label] = prop + + if node_address in defs: + defs[node_address].update(prop_def) + else: + defs[node_address] = prop_def + + return + +def extract_property(yaml, node_address, y_key, y_val, names, prefix, defs): + + node = reduced[node_address] + def_label = convert_string_to_label(get_compat(node)).upper() + def_label += '_' + node_address.split('@')[-1].upper() + + if y_key == 'reg': + extract_reg_prop(node_address, names, defs, def_label, 1) + elif y_key == 'interrupts' or y_key == 'interupts-extended': + extract_interrupts(node_address, yaml, y_key, names, defs, def_label) + elif 'pinctrl-' in y_key: + p_index = int(y_key.split('-')[1]) + extract_pinctrl(node_address, yaml, reduced[node_address]['props'][y_key], + names[p_index], p_index, defs, def_label) + elif 'clocks' in y_key: + extract_cells(node_address, yaml, y_key, + names, 0, prefix, defs, def_label) + else: + extract_single(node_address, yaml[get_compat(reduced[node_address])], + reduced[node_address]['props'][y_key], y_key, + prefix, defs, def_label) + + return + +def extract_node_include_info(reduced, node_address, yaml, defs, structs): + node = reduced[node_address] + node_compat = get_compat(node) + + if not node_compat in yaml.keys(): + return {}, {} + + y_node = yaml[node_compat] + + # check to see if we need to process the properties + for yp in y_node['properties']: + for k,v in yp.items(): + if 'generation' in v: + if v['generation'] == 'define': + label = v.get('define_string') + storage = defs + else: + label = v.get('structures_string') + storage = structs + + prefix = [] + if v.get('use-name-prefix') != None: + prefix = [convert_string_to_label(k.upper())] + + for c in node['props'].keys(): + if c.endswith("-names"): + pass + + if re.match(k + '$', c): + + if 'pinctrl-' in c: + names = node['props'].get('pinctrl-names', []) + else: + names = node['props'].get(c[:-1] + '-names', []) + if not names: + names = node['props'].get(c + '-names', []) + + if not isinstance(names, list): + names = [names] + + extract_property(yaml, node_address, c, v, names, prefix, defs) + + return + +def yaml_collapse(yaml_list): + collapsed = dict(yaml_list) + + for k,v in collapsed.items(): + props = set() + if 'properties' in v: + for entry in v['properties']: + for key in entry: + props.add(key) + + if 'inherits' in v: + for inherited in v['inherits']: + for prop in inherited['properties']: + for key in prop: + if key not in props: + v['properties'].append(prop) + v.pop('inherits') + + return collapsed + + +def print_key_value(k, v, tabstop): + label = "#define " + k + + # calculate the name's tabs + if len(label) % 8: + tabs = (len(label) + 7) >> 3 + else: + tabs = (len(label) >> 3) + 1 + + sys.stdout.write(label) + for i in range(0, tabstop - tabs + 1): + sys.stdout.write('\t') + sys.stdout.write(str(v)) + sys.stdout.write("\n") + + return + +def generate_include_file(defs): + compatible = reduced['/']['props']['compatible'][0] + + sys.stdout.write("/**************************************************\n") + sys.stdout.write(" * Generated include file for " + compatible) + sys.stdout.write("\n") + sys.stdout.write(" * DO NOT MODIFY\n"); + sys.stdout.write(" */\n") + sys.stdout.write("\n") + sys.stdout.write("#ifndef _DEVICE_TREE_BOARD_H" + "\n"); + sys.stdout.write("#define _DEVICE_TREE_BOARD_H" + "\n"); + sys.stdout.write("\n") + + node_keys = sorted(defs.keys()) + for node in node_keys: + sys.stdout.write('/* ' + node.split('/')[-1] + ' */') + sys.stdout.write("\n") + + maxlength = max(len(s + '#define ') for s in defs[node].keys()) + if maxlength % 8: + maxtabstop = (maxlength + 7) >> 3 + else: + maxtabstop = (maxlength >> 3) + 1 + + if (maxtabstop * 8 - maxlength) <= 2: + maxtabstop += 1 + + prop_keys = sorted(defs[node].keys()) + for prop in prop_keys: + if prop == 'aliases': + for entry in sorted(defs[node][prop]): + print_key_value(entry, defs[node][prop].get(entry), maxtabstop) + else: + print_key_value(prop, defs[node].get(prop), maxtabstop) + sys.stdout.write("\n") + + sys.stdout.write("#endif\n"); + +def main(args): + if len(args) < 2: + print('Usage: %s filename.dts path_to_yaml' % args[0]) + return 1 + + try: + with open(args[1], "r") as fd: + d = parse_file(fd) + except: + raise Exception("Input file " + os.path.abspath(args[1]) + " does not exist.") + + # compress list to nodes w/ paths, add interrupt parent + compress_nodes(d['/'], '/') + + # build up useful lists + compatibles = get_all_compatibles(d['/'], '/', {}) + get_phandles(d['/'], '/', {}) + get_aliases(d['/']) + get_chosen(d['/']) + + # find unique set of compatibles across all active nodes + s = set() + for k,v in compatibles.items(): + if isinstance(v,list): + for item in v: + s.add(item) + else: + s.add(v) + + # scan YAML files and find the ones we are interested in + yaml_files = [] + for (dirpath, dirnames, filenames) in walk(args[2]): + yaml_files.extend([f for f in filenames if re.match('.*\.yaml\Z', f)]) + yaml_files = [dirpath + '/' + t for t in yaml_files] + break + + yaml_list = {} + file_load_list = set() + for file in yaml_files: + for line in open(file, 'r'): + if re.search('^\s+constraint:*', line): + c = line.split(':')[1].strip() + c = c.strip('"') + if c in s: + if not file in file_load_list: + file_load_list.add(file) + with open(file, 'r') as yf: + yaml_list[c] = yaml.load(yf, Loader) + + if yaml_list == {}: + raise Exception("Missing YAML information. Check YAML sources") + + # collapse the yaml inherited information + yaml_list = yaml_collapse(yaml_list) + + # load zephyr specific nodes + flash = {} + console = {} + sram = {} + if 'zephyr,flash' in chosen: + flash = reduced[chosen['zephyr,flash']] + if 'zephyr,console' in chosen: + console = reduced[chosen['zephyr,console']] + if 'zephyr,sram' in chosen: + sram = reduced[chosen['zephyr,sram']] + + defs = {} + structs = {} + for k, v in reduced.items(): + node_compat = get_compat(v) + if node_compat != None and node_compat in yaml_list: + extract_node_include_info(reduced, k, yaml_list, defs, structs) + + if defs == {}: + raise Exception("No information parsed from dts file.") + + if flash: + extract_reg_prop(chosen['zephyr,flash'], None, defs, "CONFIG_FLASH", 1024) + else: + # We will add address and size of 0 for systems with no flash controller + # This is what they already do in the Kconfig options anyway + defs['dummy-flash'] = { 'CONFIG_FLASH_BASE_ADDRESS': 0, 'CONFIG_FLASH_SIZE': 0 } + + if sram: + extract_reg_prop(chosen['zephyr,sram'], None, defs, "CONFIG_SRAM", 1024) + + # generate include file + generate_include_file(defs) + +if __name__ == '__main__': + # test1.py executed as script + # do something + sys.exit(main(sys.argv))