diff --git a/config.mk b/config.mk index 9304552..a6a4ecd 100644 --- a/config.mk +++ b/config.mk @@ -49,6 +49,8 @@ $(AUTOCONF_HW_OUTPUTS_PF) ${AUTOCONF_SW_OUTPUTS_PF} ${AUTOCONF_OS_OUTPUTS_PF} $( # Call from other (application ) makefiles without knowing the files autoconf depends on loki-configure-hw: ${AUTOCONF_HW_OUTPUTS_PF} ${AUTOCONF_OUTPUTS_PF} + # Also force-create the symlinks that external Makefiles may rely on. + $(MAKE) -C ${LOKI_DIR}/design/ .platform_configured loki-configure-sw: ${AUTOCONF_SW_OUTPUTS_PF} ${AUTOCONF_OUTPUTS_PF} diff --git a/control/loki/adapter.py b/control/loki/adapter.py index 88bcdc6..6dac34c 100644 --- a/control/loki/adapter.py +++ b/control/loki/adapter.py @@ -18,6 +18,8 @@ import logging import gpiod import time +import datetime +import psutil import os import concurrent.futures as futures import threading @@ -268,6 +270,8 @@ class LokiCarrier(ABC): def __init__(self, **kwargs): # Get system information + self._logger = logging.getLogger('LokiCarrier') + try: with open('/etc/loki/version') as info: self.__lokiinfo_version = info.read() @@ -292,7 +296,11 @@ def __init__(self, **kwargs): except FileNotFoundError: self.__lokiinfo_application_name = 'unknown' - self._logger = logging.getLogger('LokiCarrier') + try: + self.__lokiinfo_odin_version = os.popen('odin_control --version').read().split('\n')[0] + except Exception as e: + self.__lokiinfo_odin_version = 'unknown' + self._logger.error('Failed to get odin server version: {}'.format(e)) self._supported_extensions = [] self._change_callbacks = {} @@ -364,6 +372,7 @@ def _start_io_loops(self, options): self._threads['gpio'] = self._thread_executor.submit(self._loop_gpiosync) self._threads['ams'] = self._thread_executor.submit(self._loop_ams) + self._threads['perf'] = self._thread_executor.submit(self._loop_performance, options) atexit.register(self._terminate_loops) @@ -418,8 +427,18 @@ def set_pin_options(friendly_name, pin_id, is_input, active_low, default_value=F def _gen_paramtree_dict(self): + self._zynq_perf_mem_cached = {} + self._zynq_perf_uptime_str = "" + self._zynq_perf_net_addr = "" + self._zynq_perf_net_speed = "" + self._zynq_disk_usage = {} + self._zynq_perf_cpu_load = (None,None,None) + self._zynq_perf_cpu_perc = "" + self._zynq_perf_cpu_times = {} + base_tree_dict = { 'carrier_info': { + 'odin_control_version': (lambda: self.__lokiinfo_odin_version, None, {"description": "odin-control version"}), 'version': (lambda: self.__lokiinfo_version, None, {"description": "LOKI system image repo tag"}), 'application_version': (lambda: self.__lokiinfo_application_version, None, {"description": "Application version"}), 'application_name': (lambda: self.__lokiinfo_application_name, None, {"description": "Application name"}), @@ -428,6 +447,25 @@ def _gen_paramtree_dict(self): 'extensions': (self.get_avail_extensions, None, {"description": "Comma separated list of carrier's supported extensions"}), 'application_interfaces': self._get_paramtree_interfaces_dict(), 'loopstatus': (self.get_loop_status, None, {"description": "Reports on the state of the background loops"}), + 'performance': { + 'mem': { + 'free': (lambda: self._zynq_perf_mem_cached.get('free'), None), + 'avail': (lambda: self._zynq_perf_mem_cached.get('avail'), None), + 'total': (lambda: self._zynq_perf_mem_cached.get('total'), None), + 'cached': (lambda: self._zynq_perf_mem_cached.get('cached'), None), + }, + 'uptime': (lambda: self._zynq_perf_uptime_str, None), + 'net': { + 'address': (lambda: self._zynq_perf_net_addr, None), + 'speed': (lambda: self._zynq_perf_net_speed, None), + }, + 'disk_used_perc': (lambda: self._zynq_disk_usage, None), + 'cpu': { + 'load': (lambda: self._zynq_perf_cpu_load, None), + 'percent': (lambda: self._zynq_perf_cpu_perc, None), + 'times': (lambda: self._zynq_perf_cpu_times, None), + }, + }, }, 'control': { 'application_enable': (self.get_app_enabled, self.set_app_enabled, { @@ -523,6 +561,111 @@ def set(self, path, data): except AttributeError: raise ParameterTreeError + ######################################## + # Built-in Zynq Performance Monitoring # + ######################################## + + def _loop_performance(self, options): + + disk_info_directories = options.get('disk_info_directories', None) + if disk_info_directories is None: + # Use default list + disk_info_directories = [ + '/mnt/flashmtd1', + '/mnt/sd-mmcblk1p1', + '/opt/loki-detector/exports', + ] + else: + # Of overrides provided, convert to list + disk_info_directories = [x.strip() for x in disk_info_directories.split(',')] + + while not self.TERMINATE_THREADS: + time.sleep(5) + + self._sync_performance_meminfo() + self._sync_performance_uptime() + self._sync_performance_netinfo() + self._sync_performance_diskinfo(disk_info_directories) + self._sync_performance_cpuinfo() + + def _sync_performance_meminfo(self): + # Update cached memory-related performance values + try: + meminfo = psutil.virtual_memory() + self._zynq_perf_mem_cached['free'] = meminfo.free + self._zynq_perf_mem_cached['avail'] = meminfo.available + self._zynq_perf_mem_cached['total'] = meminfo.total + self._zynq_perf_mem_cached['cached'] = meminfo.cached + except Exception as e: + self._zynq_perf_mem_cached['free'] = None + self._zynq_perf_mem_cached['avail'] = None + self._zynq_perf_mem_cached['total'] = None + self._zynq_perf_mem_cached['cached'] = None + self._logger.error('Failed to retrieve memory performance values from psutil: {}'.format(e)) + + + def _sync_performance_uptime(self): + # Update cached uptime value + try: + self._zynq_perf_uptime_str = str(datetime.timedelta(seconds=int(time.time() - psutil.boot_time()))) + except Exception as e: + self._zynq_perf_uptime_str = None + self._logger.error('Failed to retrieve uptime value from psutil: {}'.format(e)) + + def _sync_performance_netinfo(self): + # Update cached network-related performance values + try: + self._zynq_perf_net_addr = psutil.net_if_addrs()['eth0'][0].address + except Exception as e: + self._zynq_perf_net_addr = None + self._logger.error('Failed to retrieve network address from psutil: {}'.format(e)) + + try: + self._zynq_perf_net_speed = psutil.net_if_stats()['eth0'].speed + except Exception as e: + self._zynq_perf_net_speed = None + self._logger.error('Failed to retrieve network speed from psutil: {}'.format(e)) + + def _sync_performance_diskinfo(self, directories): + # Update cached disk-related performance values + for directory in directories: + try: + self._zynq_disk_usage[directory] = psutil.disk_usage(directory).percent + except Exception as e: + self._zynq_disk_usage[directory] = None + self._logger.error('Failed to get disk usage for directory {}: {}'.format(directory, e)) + + def _sync_performance_cpuinfo(self): + # Update cached cpu-related performance values + try: + self._zynq_perf_cpu_load = psutil.getloadavg() + except Exception as e: + self._zynq_perf_cpu_load = None + self._logger.error('Failed to get CPU load info from psutil: {}'.format(e)) + + try: + self._zynq_perf_cpu_perc = psutil.cpu_percent() + except Exception as e: + self._zynq_perf_cpu_perc = None + self._logger.error('Failed to get CPU percent info from psutil: {}'.format(e)) + + try: + rawtimes = psutil.cpu_times_percent() + self._zynq_perf_cpu_times = {} + self._zynq_perf_cpu_times['user']= rawtimes.user + self._zynq_perf_cpu_times['nice']= rawtimes.nice + self._zynq_perf_cpu_times['system']= rawtimes.system + self._zynq_perf_cpu_times['idle']= rawtimes.idle + self._zynq_perf_cpu_times['iowait']= rawtimes.iowait + self._zynq_perf_cpu_times['irq']= rawtimes.irq + self._zynq_perf_cpu_times['softirq']= rawtimes.softirq + self._zynq_perf_cpu_times['steal']= rawtimes.steal + self._zynq_perf_cpu_times['guest']= rawtimes.guest + self._zynq_perf_cpu_times['guest_nice']= rawtimes.guest_nice + except Exception as e: + self._zynq_perf_cpu_times = None + self._logger.error('Failed to get CPU times info from psutil: {}'.format(e)) + ######################################## # Built-in Zynq Temperature Monitoring # ######################################## diff --git a/control/loki/controllertest.py b/control/loki/controllertest.py new file mode 100644 index 0000000..6f88b90 --- /dev/null +++ b/control/loki/controllertest.py @@ -0,0 +1,45 @@ +from register_controller import RegisterController, Field, MultiField + +simulated_regs = { + 0: 0x1234, + 1: 0xBBBB, +} + +def outer_read(self, address, length): + outbuffer = [] + for i in range(length): + outbuffer.append(simulated_regs[address+i]) + print('SYSTEM READ ADDRESS {}, {} words: {}'.format(hex(address), length, [hex(x) for x in outbuffer])) + return outbuffer + +def outer_write(self, address, values): + for i in range(len(values)): + simulated_regs[address+i] = values[i] + print('SYSTEM WRITE ADDRESS {} with {}'.format(hex(address), [hex(x) for x in values])) + +con = RegisterController( + func_readreg = lambda address, length: outer_read(None, address, length), + func_writereg = lambda address, values: outer_write(None, address, values), + word_width_bits=16, +) + +con.add_register(0, 0) +con.add_register(1, 0) + +basicfield = Field(con, 'basic', 'basic field', 0x00, 7, 4) +basicfield2 = Field(con, 'basic2', 'basic field 2', 0x00, 3, 4) + +assert(con.get_field('basic').read() == 0x3) +assert(con.get_field('basic2').read() == 0x4) + +spanningfield = Field(con, 'spn', 'spanning field', 0x00, 3, 8) + +assert(con.get_field('spn').read() == 0x4b) + +con.get_field('basic').write(0xA) +assert(simulated_regs[0] & 0x00F0 == 0x00A0) +assert(con.get_field('basic').read() == 0xA) + +con.get_field('spn').write(0xBD) +assert(simulated_regs[0] & 0xF == 0xB and simulated_regs[1] & 0xF000 == 0xD000) +assert(con.get_field('spn').read() == 0xBD) diff --git a/control/loki/register_controller.py b/control/loki/register_controller.py new file mode 100644 index 0000000..55099e5 --- /dev/null +++ b/control/loki/register_controller.py @@ -0,0 +1,551 @@ +import threading +import math +import csv +import logging + +# tabulate module will give nicer results, but a rough table can be provided +try: + from tabulate import tabulate +except ModuleNotFoundError: + def tabulate(table, **kwargs): + table.insert(1, ['-' * 20] * len(table[0])) + rows = [('|').join([str(x)[:20].ljust(20) for x in tablerow]) for tablerow in table] + return ('\n').join(rows) + + +class RegisterCache (object): + def __init__(self, is_volatile, force_nocache=False): + self._is_volatile = is_volatile + self._value = None + self._force_nocache = force_nocache + + def valid(self): + return (self._is_volatile is False and + self._value is not None and + self._force_nocache is False) + + def get_value(self): + # Return None if not available + if self.valid(): + return self._value + else: + return None + + def set_value(self, value): + self._value = value + + def enable_cache(self): + self._force_nocache = False + + def disable_cache(self): + self._force_nocache = True + self._value = None + + def set_volatile(self): + self._is_volatile = True + + +class Field (object): + # A field is designed to hold a control / data value of grouped bits. + # It can span several registers as long as the bits run into each other at the boundary. + # Otherwise see MultiField. + def __init__(self, controller, shortname, description, start_register_address, start_bit, length_bits, reversed_bits=False, is_subfield=False): + self._controller = controller + + self.shortname = shortname + self.description = description + self.start_address = start_register_address + self.start_bit = start_bit + self.length_bits = length_bits + self.reversed_bits = reversed_bits + self.is_subfield = is_subfield + + self._controller._add_field(self) + + def __repr__(self): + return ''.format( + self.shortname, self.start_address, self.start_bit, self.length_bits + ) + + def __lt__(self, other): + # Operators are defined with respect to the location of the first bit + if self.start_address != other.start_address: + return self.start_address < other.start_address + else: + # Sorting via start bit is reversed, as MSB first + return self.start_bit > other.start_bit + + def __mt__(self, other): + # Operators are defined with respect to the location of the first bit + if self.start_address != other.start_address: + return self.start_address > other.start_address + else: + # Sorting via start bit is reversed, as MSB first + return self.start_bit < other.start_bit + + def get_description(self): + return self.description + + def _get_register_span(self): + # Work out how many registers this field will span + # overshoot is number of bits in the last register that are not part of this field. + word_width = self._controller.get_word_width() + + # Length of field plus bits before start bit in first register, i.e. how far past + # the start of the first register we read + read_depth_bits = (word_width - (self.start_bit + 1)) + self.length_bits + #print('read depth', read_depth_bits) + + # Registers spanned is determined by how many bits we aim to read + registers_spanned = int((read_depth_bits-1) / word_width) + 1 + + overshoot = (word_width - ((self.length_bits - (self.start_bit + 1)))) % word_width + + #print('field starting at bit {} length {} spans {} registers with overshoot {}'.format( + # self.start_bit, self.length_bits, registers_spanned, overshoot + #)) + + return registers_spanned, overshoot + + def read(self): + # Read field value, potentially spanning multiple registers + + # Get all registers that contain parts of this field + registers_spanned, overshoot = self._get_register_span() + register_readback = self._controller.read_register(self.start_address, length=registers_spanned) + #print('register readback: {}'.format([hex(x) for x in register_readback])) + + # Combine register values into single value, MSB first + offset = self._controller.get_word_width() * (len(register_readback) - 1) + combined_register_buffer = 0 + for regval in register_readback: + combined_register_buffer |= (regval << offset) + offset -= self._controller.get_word_width() + #print('combined register buffer: {}'.format(hex(combined_register_buffer))) + + # Down shift so that field is now in LSBits + shifted_buffer = int(combined_register_buffer >> overshoot) + + # Mask off any other bits (higher than the field) + field_mask = int('1'*self.length_bits, 2) + field_buffer = shifted_buffer & field_mask + #print('field buffer: {}'.format(hex(field_buffer))) + + return field_buffer + + def write(self, value): + # Write field value, potentially spanning multiple registers. Read-modify-write. + + if value >= math.pow(2, self.length_bits): + raise Exception('writing beyond field boundary, is {} bits'.format(self.length_bits)) + + # TODO need MUTEX PROTECTION HERE covering read and write + + # Get info for registers that contain parts of this field, read original values + registers_spanned, overshoot = self._get_register_span() + register_readback = self._controller.read_register(self.start_address, length=registers_spanned) + + # Combine register values into single value, MSB first + offset = self._controller.get_word_width() * (registers_spanned - 1) + combined_register_buffer = 0 + for regval in register_readback: + combined_register_buffer |= (regval << offset) + offset -= self._controller.get_word_width() + #print('combined register buffer:', hex(combined_register_buffer)) + + # Create a single value for the field, and shift it up to the correct offset for all registers + field_shifted = value << overshoot + + # Mask shifted field onto the register values + field_mask = int('1'*self.length_bits, 2) << overshoot + modified_register_buffer = (combined_register_buffer & (~field_mask)) | field_shifted + #print('modified register buffer:', hex(modified_register_buffer)) + + # Write register values back + work_mask = int('1'*self._controller.get_word_width(), 2) + offset = self._controller.get_word_width() * (registers_spanned - 1) + modified_register_buffer_list = [] + for i in range(registers_spanned): + regval = (modified_register_buffer >> (offset)) & work_mask + modified_register_buffer_list.append(regval) + + offset -= self._controller.get_word_width() + + # Write to registers + self._controller.write_register(self.start_address, modified_register_buffer_list) + + +class MultiField (Field): + # A field that is comprised of several sub-fields of bits in separate locations, treated as one entity. + # This can be used where a field is made up of the MSBs of register n and LSBs of register n+1. + + def __init__(self, controller, shortname, description, fields_msbfirst: list, reversed_bits=False): + self._fields = fields_msbfirst + + full_length = 0 + for field in self._fields: + full_length += field.length_bits + field.is_subfield = True + + super(MultiField, self).__init__( + controller=controller, + shortname=shortname, + description=description, + start_register_address=self._fields[0].start_address, + start_bit=self._fields[0].start_bit, + length_bits=full_length, + reversed_bits=reversed_bits, + ) + + def __repr__(self): + return ''.format( + self.shortname, [fld.__repr__() for fld in self._fields] + ) + + def read(self): + # The output value will be assembled bitwise from other fields + assembly_buffer = int() + + # Loop through fields MSBit to LSBit + start_offset = self.length_bits + for field in self._fields: + # Read the individual field and insert into buffer + field_readval = field.read() + assembly_buffer = assembly_buffer | (field_readval << (start_offset-field.length_bits) ) + + start_offset -= field.length_bits + + return assembly_buffer + + def write(self, value): + # The written value will be split bitwise into multiple fields, written individually + # (inefficient, but probably the best way of doing it). + + # Loop through fields MSBit to LSBit + start_offset = self.length_bits + for field in self._fields: + # Retrieve the sub-field value from the entire value + field_mask = int('1'*field.length_bits, 2) + value_shifted = value >> (start_offset-field.length_bits) + value_masked = value_shifted & field_mask + + # Write the sub-field + field.write(value_masked) + + start_offset -= field.length_bits + + +class RegisterController (object): + def __init__(self, func_readreg, func_writereg, word_width_bits, cache_enabled=True): + # If cache_enabled is True, traffic will be minimised by returning known values + # of registers from last read/write. This takes into account volatility, + # so that registers can contain bits that are always read back directly. Setting + # this false will mean every read/write will interact with the bus, meaning more + # traffic. + + self._logger = logging.getLogger('REGISTER_CONTROLLER') + + # Assign direct SPI access registers, used internally only. Any chip specifics + # such as page switching for upper addresses shall be defined by the caller. + self._read_register_direct = func_readreg + self._write_register_direct = func_writereg + + self._word_width_bits = word_width_bits + + # Create lock used to protect register cache and SPI access with + # critical sections + self._register_mutex = threading.Lock() + #todo actually use this + + # Create register cache, which will be a dictionary of addressed registers, where + # address is not strongly typed (but must be compatible with direct functions). + self._register_cache = {} + self._cache_enabled = cache_enabled + + # Create the field dictionary, indexed by unique names. + # Cached field values rely on the register cache. + self._fields = {} + + self.stats_reset() + + def get_word_width(self): + return self._word_width_bits + + def add_register(self, address, is_volatile): + register_cache = RegisterCache(bool(is_volatile), force_nocache=not(self._cache_enabled)) + self._register_cache.update({address: register_cache}) + + def _add_field(self, field: Field): + self._fields.update({field.shortname: field}) + + def _create_registers_for_field(self, field, is_volatile): + # Get the registers this field will operate on + register_span, overshoot = field._get_register_span() + required_registers = range(field.start_address, field.start_address + register_span) + + for register_address in required_registers: + # Add the registers if they do not exist + if self._register_cache.get(register_address) is None: + self._logger.info('register {} does not exist, creating...'.format(register_address)) + self.add_register(register_address, is_volatile) + + def add_field(self, shortname, description, start_register_address, start_bit, length_bits, reversed_bits=False, is_volatile=True): + # Fields are volatile by default so that they will always be read back if the + # caching is enabled. + current_field = Field( + controller=self, + shortname=shortname, + description=description, + start_register_address=start_register_address, + start_bit=start_bit, + length_bits=length_bits, + reversed_bits=reversed_bits, + is_subfield=False, # This will be set by multifield + ) + + self._create_registers_for_field(current_field, is_volatile) + + return current_field + + def add_multifield(self, shortname, description, fields_msbfirst: list, reversed_bits=False): + current_field = MultiField( + controller=self, + shortname=shortname, + description=description, + fields_msbfirst=fields_msbfirst, + reversed_bits=reversed_bits + ) + + # Creating registers for a multifield is not necessary, as it will be done when + # creating sub-fields + + return current_field + + def read_register(self, start_address, length=1, direct_read=False): + # Return the cached value unless it is invalid, in which case SPI is used + # If direct_read is False, cache will always be ignored for this read + + # Attempt cache read + cached_values = [] + for address in range(start_address, start_address+length): + try: + regcache = self._register_cache[address].get_value() + cached_values.append(regcache) + except Exception as e: + # If failed to get any cached value, abort + cached_values.append(None) + + # If any registers had no valid cache, read the values directly + if None in cached_values or direct_read: + #print('Failed to use cache, reading directly') + try: + direct_read_values = self._read_register_direct(start_address, length) + except Exception as e: + # Failed to read the direct interface of the device + self._stats_record_failread() + self._logger.error('Attempted a direct read of register 0x{}, failed: {}'.format(hex(start_address), e)) + raise + + # Cache the values + for i in range(length): + self._register_cache[start_address+i].set_value(direct_read_values[i]) + + latest_values = direct_read_values + self._stats_record_directread() + self._logger.debug('Made a direct read to ASIC register 0x{}'.format(hex(start_address))) + else: + latest_values = cached_values + self._stats_record_cachedread() + + return latest_values + + def write_register(self, start_address, values): + # Write to the array as a whole register (single operation), and cache for later use + + # Write direct + try: + self._write_register_direct(start_address, values) + except Exception as e: + self._logger.error('Attempted a write of register 0x{}, failed: {}'.format(hex(start_address), e)) + raise + + # Cache the written values + for i in range(len(values)): + self._register_cache[start_address+i].set_value(values[i]) + + + def _stats_record_directread(self): + # Record a register read operation that used the direct interface + self._stats_direct += 1 + + def _stats_record_cachedread(self): + # Record a register read operation that used the cached value + self._stats_cached += 1 + + def stats_reset(self): + self._stats_direct = 0 + self._stats_cached = 0 + + def stats_cached_direct(self): + return (self._stats_cached, self._stats_direct) + + def _get_field(self, name): + return self._fields[name] + + def get_fields(self): + return list(self._fields.keys()) + + def read_field(self, fieldname): + return self._get_field(fieldname).read() + + def write_field(self, fieldname, value): + self._get_field(fieldname).write(value) + + + def summarise_fields(self, address_range=None, ignore_subfields=True, additional_decode=None): + # Pretty-print a table of field information for fields starting within the given + # register range. If address_range=None, list all registers. Subfields are ignored + # by default so that table prints out top-level understanding. + + # additional_decode allows the user to provide a function that will further decode + # field value for all fields in the range. This will add an additional column. + # This function will be provided two arguments: , + + table = [['Name', 'Description', 'Start Address', 'Start Bit', 'Length', 'Value', 'Value (Hex)']] + + if additional_decode: + table[0].append('Decoded') + + for field in sorted(list(self._fields.values())): + if address_range is None or field.start_address in address_range: + if ignore_subfields and field.is_subfield: + continue + + # Attempt to read current value + try: + value = field.read() + except Exception as e: + value = 'ReadErr' + + newrow = [ + field.shortname, + field.description[:30], + field.start_address, + field.start_bit, + field.length_bits, + value, + hex(value), + ] + + if additional_decode: + if type(value) is int: + newrow.append(additional_decode(field.shortname, value)) + else: + newrow.append(value) # Read error + + table.append(newrow) + + return tabulate(table, headers='firstrow', tablefmt='fancy_grid') + + def enable_cache(self): + self._cache_enabled = True + + # Enable cache for each register + for address in self._register_cache.keys(): + self._register_cache[address].enable_cache() + + self._logger.warning('Cache Enabled') + + def disable_cache(self): + self._cache_enabled = False + + # Diable cache for each register, and reset the cached value + for address in self._register_cache.keys(): + self._register_cache[address].disable_cache() + + self.stats_reset() + + self._logger.warning('Cache Disabled') + + def cache_enabled(self): + return self._cache_enabled + + def clear_cache(self): + # To be called when it is known that the cache is invalid, and stored + # values should not be trusted. For example, on ASIC reset. + # Will leave cache enable state in whatever state it was in before this + # was called. + prev_enabled = self.cache_enabled() + self.disable_cache() + + if prev_enabled: + self.enable_cache() + + self._logger.warning('Cache Cleared') + + + def process_csv_fields(self, csv_filename): + # Take input CSV file for defining fields and registers. + # Registers will be created automatically if they do not exist, and will be set + # volatile if any field is volatile. + + # For multifields, populate the subfields key. + + # CSV format: + # , , , , , , + # where is defined as: "shortname1, shortname2, shortname3" or "" + + + with open(csv_filename, newline='') as csvfile: + csvreader = csv.reader(csvfile, delimiter=',', quotechar='"') + for row in csvreader: + if len(row) == 0: + # blank line + continue + + try: + subfields = row[5].strip() + is_multifield = bool(len(subfields) > 0) + + if is_multifield: + # This is a MultiField + # We must assume that the subfields have been created already + # Therefore the registers will be handled + + required_subfields = [self._get_field(fldname.strip()) for fldname in subfields.split(',')] + + currentfield = self.add_multifield( + shortname=row[0], + description=row[1], + fields_msbfirst=required_subfields, + ) + + self._logger.debug('Added field {} comprised of {}'.format( + currentfield.shortname, + currentfield._fields, + )) + + else: + # This is a Field + + is_volatile = row[6].strip() == 'True' + + currentfield = self.add_field( + shortname=row[0], + description=row[1], + start_register_address=int(row[2]), + start_bit=int(row[3]), + length_bits=int(row[4]), + is_volatile=is_volatile, + ) + + self._logger.debug('Added {} field {} to register {}/{} len {}'.format( + 'volatile' if is_volatile else 'non-volatile', + currentfield.shortname, + currentfield.start_address, + currentfield.start_bit, + currentfield.length_bits, + )) + except Exception as e: + self._logger.error('Failed to decode CSV on row {}: {}'.format(row, e)) + raise (e) diff --git a/design/carrier_support/loki_1_0/constraints/te0803_generic/_l_babyd-constraints.xdc b/design/carrier_support/loki_1_0/constraints/te0803_generic/_l_babyd-constraints.xdc deleted file mode 100644 index 8b552d2..0000000 --- a/design/carrier_support/loki_1_0/constraints/te0803_generic/_l_babyd-constraints.xdc +++ /dev/null @@ -1,28 +0,0 @@ -# This file will add definitions to target the BabyD project for the TEBF0808 carrier board. - -# Certain pins have already been targeted as specific control pins in the LOKI control block. -# These include: -# - The I2C/SPI buses -# - LVDS SYNC (EMIO 17) -# - Temp INT (EMIO 4) -# - Temp nRST (EMIO 12) -# - ASIC nRST (EMIO 7) -# - VREG_EN (EMIO 6) - -# Any of the unused GPIO lines can be used for this without block design -# modification (range 21-31). The last three are used in preference -# because they were routed to unused SoM pins, and the others could -# prove useful. - -# Because the SPI and I2C should already have been configured to be routed out to FMC as with -# HEXITEC-MHz, for now there are no changes. This will change as more complex control is required. - -# Re-route ASIC reset signal to the BabyD specific line: main con G13 -> HP_G1_L6_N -> J1:107 -> A8 on TE0803 -set_property PACKAGE_PIN A8 [get_ports APP_NRST_lc7 ] - -# Route the second LVDS (EMIO18) to the BabyD Sync signal. Still requires jumpers. -set_property PACKAGE_PIN L3 [get_ports {GPIO_LVDS_P_17_20[1]} ] -set_property PACKAGE_PIN L2 [get_ports {GPIO_LVDS_N_17_20[1]} ] - -# Re-route FireFly nReset signal to the BabyD specific line: main con F22 -> HP_G1_L20_N -> J1:142 -> E2 on TE0803 -set_property PACKAGE_PIN E3 [get_ports {GPIO_APP_21_31[8]} ] diff --git a/os/petalinux-custom/project-spec/meta-user/classes/odin-control-instance.bbclass b/os/petalinux-custom/project-spec/meta-user/classes/odin-control-instance.bbclass index a455aec..ba3248c 100644 --- a/os/petalinux-custom/project-spec/meta-user/classes/odin-control-instance.bbclass +++ b/os/petalinux-custom/project-spec/meta-user/classes/odin-control-instance.bbclass @@ -1,6 +1,6 @@ # RDEPENDS specifies packages that are required at runtime on the host, as well as for build. RDEPENDS_${PN} += "python3-setuptools" -RDEPENDS_${PN} += "odin-control (= 1.3.0)" +RDEPENDS_${PN} += "odin-control (>=1.3.0)" RDEPENDS_${PN} += "odin-sequencer (=0.2.0)" RDEPENDS_${PN} += "odin-devices (=1.1.0)" RDEPENDS_${PN} += "python3-msgpack" diff --git a/os/petalinux-custom/project-spec/meta-user/recipes-apps/loki/files/loki-config.sh b/os/petalinux-custom/project-spec/meta-user/recipes-apps/loki/files/loki-config.sh index 459c9ca..7f326df 100644 --- a/os/petalinux-custom/project-spec/meta-user/recipes-apps/loki/files/loki-config.sh +++ b/os/petalinux-custom/project-spec/meta-user/recipes-apps/loki/files/loki-config.sh @@ -206,11 +206,22 @@ function service_start { function service_stop { echo "Stopping LOKI detector" - start-stop-daemon -K \ - -p $PIDFILE && \ - rm -rf $PIDFILE + if [ -f $PIDFILE ]; then + + start-stop-daemon -K \ + -p $PIDFILE + + while [ -d /proc/$(cat $PIDFILE) ]; do + sleep 1 + echo "waiting..." + done - echo "Service stopped" + rm -rf $PIDFILE + + echo "Service stopped" + else + echo "Service was not running" + fi } echo "Called with run argument ${1}" diff --git a/os/petalinux-custom/project-spec/meta-user/recipes-apps/odin-control/odin-control_1.5.0.bb b/os/petalinux-custom/project-spec/meta-user/recipes-apps/odin-control/odin-control_1.5.0.bb new file mode 100644 index 0000000..6b487d9 --- /dev/null +++ b/os/petalinux-custom/project-spec/meta-user/recipes-apps/odin-control/odin-control_1.5.0.bb @@ -0,0 +1,22 @@ +SUMMARY = "This is a recipe to build odin-control on PetaLinux" + +# RDEPENDS specifies packages that are required at runtime on the host, as well as for build. +RDEPENDS_${PN} += "python3-setuptools" +RDEPENDS_${PN} += "python3-tornado (<6.0)" +RDEPENDS_${PN} += "python3-fcntl" +RDEPENDS_${PN} += "python3-future" +RDEPENDS_${PN} += "python3-pyzmq (>=17.0)" +RDEPENDS_${PN} += "python3-psutil" + +SRC_URI = "git://github.com/odin-detector/odin-control.git;protocol=http;tag=${PV}" +SRC_URI[md5sum] = "1af5b49ffe84b3360b23086c7bb06a15" + +# This has to be in the format expected in Yocto's license list... +LICENSE = "Apachev2" +# Get this value by running md5sum on the license file +LIC_FILES_CHKSUM = "file://LICENSE;md5=e3fc50a88d0a364313df4b21ef20c29e" + + +inherit setuptools3 + +S = "${WORKDIR}/git/" diff --git a/os/petalinux-custom/project-spec/meta-user/recipes-apps/odin-loki-adapter/python-loki-adapter.bb b/os/petalinux-custom/project-spec/meta-user/recipes-apps/odin-loki-adapter/python-loki-adapter.bb index 01efdda..a5fe729 100644 --- a/os/petalinux-custom/project-spec/meta-user/recipes-apps/odin-loki-adapter/python-loki-adapter.bb +++ b/os/petalinux-custom/project-spec/meta-user/recipes-apps/odin-loki-adapter/python-loki-adapter.bb @@ -3,18 +3,14 @@ SECTION = "examples" LICENSE = "CLOSED" RDEPENDS_${PN} += "python3-setuptools" -RDEPENDS_${PN} += "odin-control (= 1.3.0)" -RDEPENDS_${PN} += "odin-devices (=1.1.0)" +RDEPENDS_${PN} += "odin-control (>=1.3.0)" +RDEPENDS_${PN} += "odin-devices (>=1.1.0)" SRC_URI = "file://setup.py \ file://loki/__init__.py \ - file://loki/adapter.py" + file://loki/adapter.py \ + file://loki/register_controller.py" S = "${WORKDIR}" inherit setuptools3 - -#do_install_append () { -# install -d ${D}${bindir} -# install -m 0755 lokiadapter/loki-adapter.py ${D}${bindir} -#}