diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..710acc3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,11 @@
+boards/ZCU111/pynq_sdfec/
+boards/ZCU111/bitstreams/
+
+.Xil
+vivado.jou
+vivado.log
+
+*.log
+
+*.idea
+*.ipynb_checkpoints
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..e09f3af
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2019, Xilinx
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..6f2aaaa
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,4 @@
+all: wheel
+
+wheel:
+ python3 setup.py bdist_wheel
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3c080b5
--- /dev/null
+++ b/README.md
@@ -0,0 +1,57 @@
+# SDFEC-PYNQ
+
+This design offers an environment to evaluate the Soft Decision Forward Error
+Correction (SD-FEC) IPs using PYNQ and a ZCU111 board. Based on work by Andy Dow
+(Xilinx; Edinburgh), it allows us to play with a configurable data path
+including:
+
+ 1. A data source including BPSK, QPSK, QAM-16, and QAM-64 modulation schemes
+ 2. An encoding/decoding pair of SD-FEC blocks with a set of different LDPC
+ codes
+ 3. An AWGN channel model with configurable noise power
+
+
+
+
+## Getting started
+
+This repository is only compatible with the [PYNQ image v2.4.1](https://github.com/Xilinx/PYNQ/releases) for [ZCU111](https://www.xilinx.com/products/boards-and-kits/zcu111.html).
+
+We supply pre-built wheels with all tagged releases. These can be installed
+directly with pip.
+
+```sh
+pip3 install https://github.com/Xilinx/SDFEC-PYNQ/releases/download/v1.0_$BOARD/rfsoc_sdfec-1.0-py3-none-any.whl
+```
+
+The wheel is just a self-contained archive, so we must ask the module to copy
+its notebooks to the filesystem after installation.
+
+```sh
+python3 -c 'import rfsoc_sdfec; rfsoc_sdfec.install_notebooks()'
+```
+
+The notebook should now be available in `rfsoc_sdfec/`.
+
+## Building the wheel
+
+> NOTE: This is built on an x86 PC with Vivado, not on the board!
+
+We release pre-built wheels for every tagged release. If you want to build your
+own wheel, this can be done from a Linux PC with Python3 and Vivado 2018.3
+installed. Clone this repo and use make to build the wheel:
+
+```sh
+git clone https://github.com/Xilinx/SDFEC-PYNQ.git
+cd SDFEC-PYNQ
+BOARD=ZCU111 make wheel
+```
+
+The wheel is built in the `dist` folder.
+
+## License
+[BSD 3-Clause](https://github.com/Xilinx/SDFEC-PYNQ/blob/master/LICENSE)
diff --git a/boards/ZCU111/Makefile b/boards/ZCU111/Makefile
new file mode 100644
index 0000000..91fe54c
--- /dev/null
+++ b/boards/ZCU111/Makefile
@@ -0,0 +1,13 @@
+design_name := sdfec_pynq
+bitfile := bitstreams/sdfec_pynq.bit
+
+all: $(bitfile)
+
+$(bitfile):
+ # some bash magic to retrieve LDPC codes and get them in a format required by the demo
+ $(shell cat $(XILINX_VIVADO)/data/ip/xilinx/sd_fec_v1_1/common_tcl/{docsis,wifi,5g_demo}_decode.yml | sed 's/_decode:/:/g' | sed '/skip_enc_compatibility: 1/d' | sed '/encode: false/d' > srcs/all_codes.txt)
+
+ vivado -mode batch -source create_project.tcl -notrace
+
+clean:
+ rm -rf $(design_name) ./srcs/all_codes.txt ./bitstreams *.jou *.log NA
diff --git a/boards/ZCU111/create_project.tcl b/boards/ZCU111/create_project.tcl
new file mode 100644
index 0000000..bbf6fed
--- /dev/null
+++ b/boards/ZCU111/create_project.tcl
@@ -0,0 +1,39 @@
+set proj_name "sdfec_pynq"
+set exdes_name "sd_fec_gen_ex"
+set exdes_dir "${proj_name}/sdfec_exdes/${exdes_name}"
+
+create_project -force $proj_name ./$proj_name -part xczu28dr-ffvg1517-2L-e
+
+create_ip -name sd_fec -vendor xilinx.com -library ip -version 1.1 -module_name sd_fec_gen
+set_property -dict [list CONFIG.Standard "Custom" \
+ CONFIG.LDPC_Decode "true" \
+ CONFIG.LDPC_Decode_Code_Definition "[pwd]/srcs/all_codes.txt" \
+ CONFIG.DIN_Lanes 2 \
+ CONFIG.Include_PS_Example_Design "true" \
+ CONFIG.Example_Design_PS_Type "ZYNQ_UltraScale+_RFSoC" \
+ CONFIG.Include_Encoder "true" \
+ CONFIG.Build_SDK_Project "false"] [get_ips sd_fec_gen]
+
+open_example_project -in_process -force -dir ./${proj_name}/sdfec_exdes [get_ips sd_fec_gen]
+
+# change PS Master AXI width to 128 to conform with PYNQ requirements
+set_property -dict [list CONFIG.PSU__MAXIGP0__DATA_WIDTH {128}] [get_bd_cells zynq_ultra_ps]
+
+validate_bd_design
+save_bd_design
+
+add_files -fileset constrs_1 -norecurse ./srcs/zcu111_constraints.xdc
+
+set_property strategy Performance_ExplorePostRoutePhysOpt [get_runs impl_1]
+
+# change number of threads to suit your cpu
+launch_runs impl_1 -to_step write_bitstream -jobs 16
+wait_on_run impl_1
+
+# get bitstream and hwh files
+if {![file exists ./bitstreams/]} {
+ file mkdir ./bitstreams/
+ }
+
+file copy -force ./${exdes_dir}/${exdes_name}.runs/impl_1/ps_example_wrapper.bit ./bitstreams/${proj_name}.bit
+file copy -force ./${exdes_dir}/${exdes_name}.srcs/sources_1/bd/ps_example/hw_handoff/ps_example.hwh ./bitstreams/${proj_name}.hwh
\ No newline at end of file
diff --git a/boards/ZCU111/notebooks/SD-FEC_Evaluation.ipynb b/boards/ZCU111/notebooks/SD-FEC_Evaluation.ipynb
new file mode 100644
index 0000000..d6bf0eb
--- /dev/null
+++ b/boards/ZCU111/notebooks/SD-FEC_Evaluation.ipynb
@@ -0,0 +1,505 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# SD-FEC Evaluation\n",
+ "\n",
+ "> This notebook offers an environment to explore the Soft Decision Forward Error Correction (SD-FEC) IPs in RFSoC using the ZCU111 board. Based on work by Andy Dow (Xilinx, Edinburgh), it allows us to play with a set of configurable blocks including:\n",
+ ">\n",
+ "> 1. A data source with support for BPSK, QPSK, QAM-16, and QAM-64 modulation schemes\n",
+ ">\n",
+ "> 2. A pair of SD-FEC encoder/decoder blocks with configurable LDPC codes\n",
+ ">\n",
+ "> 3. An additive white Gaussian noise (AWGN) channel model with parameterisable noise power\n",
+ ">\n",
+ "> We'll quickly get some classic bit error rate curves from the hardware then investigate how these change with different modulation schemes and LDPC codes, and provide a look at some performance metrics. \n",
+ "\n",
+ "## Contents\n",
+ "\n",
+ " * [SD-FEC Evaluation](#SD-FEC-Evaluation)\n",
+ " + [SD-FEC refresher](#SD-FEC-refresher)\n",
+ " + [Loading the design](#Loading-the-design)\n",
+ " + [Getting a simple BER curve](#Getting-a-simple-BER-curve)\n",
+ " + [Comparing modulation schemes](#Comparing-modulation-schemes)\n",
+ " + [Comparing LDPC codes](#Comparing-LDPC-codes)\n",
+ " + [A note on performance](#A-note-on-performance)\n",
+ "\n",
+ "## SD-FEC refresher\n",
+ "\n",
+ "The ZCU111 has 8 SD-FEC integrated blocks that we can use to enable our RF systems to function under non-ideal, noisy environments.\n",
+ "\n",
+ "The SD-FEC blocks support Low Density Parity Check (LDPC) decoding and encoding, as well as the turbo code decoding used in LTE. We'll focus on LDPC codes for now since we can encode *and* decode these using a SD-FEC block. These codes are configurable from software, as we'll see [later](#Comparing-LDPC-codes).\n",
+ "\n",
+ "An LDPC code is a form of parity check matrix. Let's take a look at a graphical representation of what this means:\n",
+ "\n",
+ "![](assets/ldpc_fourney.svg)\n",
+ "\n",
+ "Here the row of `=` blocks represent the original data bits, the `+` blocks represent the parity bits, and the code dictates the number of blocks and their interconnects.\n",
+ "Note that most data bits contribute to multiple parity bits. Upon detecting error(s), multiple parity bits can be used to iteratively retrieve the original data. This iterative decode process can terminate early if we detect a valid codeword.\n",
+ "\n",
+ "For some further reading on LDPC codes, take a look at Bernhard M.J. Leiner's excellent [tutorial](http://www.bernh.net/media/download/papers/ldpc.pdf). You might want to save this reading for later though — some of our upcoming SD-FEC tests take a few minutes to execute!\n",
+ "\n",
+ "## Loading the design\n",
+ "\n",
+ "We'll first load the bitstream and our supporting Python library"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from rfsoc_sdfec import SdFecOverlay, ModType\n",
+ "ol = SdFecOverlay()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This design includes a complete datapath with a pair of SD-FEC encoding/decoding blocks, as pictured below.\n",
+ "![](assets/sd-fec-eval.svg)\n",
+ "\n",
+ "Let's have a look at what we can do with this design. Take a look at the most important method we expose, `run_block`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ol.run_block?"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To run a block of data through the signal path, we must supply configurations for the source, SD-FEC, and channel model. Let's take the time to define a set of default parameters."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "base_params = lambda : dict(\n",
+ " source_params = dict(\n",
+ " mod_type = ModType.BPSK,\n",
+ " zero_data = False,\n",
+ " num_blocks = 5000,\n",
+ " ),\n",
+ " fec_params = dict(\n",
+ " code_name = 'docsis_short',\n",
+ " max_iter = 8,\n",
+ " term_on_pass = True,\n",
+ " ),\n",
+ " channel_params = dict(\n",
+ " snr = 5.0,\n",
+ " skip_chan = False,\n",
+ " ),\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Getting a simple BER curve\n",
+ "\n",
+ "First of all, let's try to run a single block of data through the signal path. We ask `base_params` for a set of parameters, and pass it to the overlay."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ol.run_block(**base_params())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To be clear, we've just used two of the SD-FEC blocks present in the RFSoC! We've asked the overlay to push 5000 blocks of random data through an SD-FEC encoder, through a noisy channel using BPSK modulation, and back through an SD-FEC decoder. The size of each block depends on the LDPC code selected but in this case, we've just sent $\\approx$40 Mb through the data path.\n",
+ "\n",
+ "There's a lot of statistics we can potentially look at. These include:\n",
+ " * Bit Error Rate (BER) and Frame Error Rate (FER) of the final signal *after* SD-FEC decoding\n",
+ " \n",
+ " * BER and FER of the raw received signal *before* SD-FEC decoding\n",
+ " \n",
+ " * Throughput of the SD-FEC encoding and decoding in Gb/s\n",
+ " \n",
+ " * Average iterations needed for SD-FEC decoding (remember, the decoder can exit early)\n",
+ " \n",
+ "Let's now run a set of tests, sweeping the SNR of the channel from low (noisy channel) to high (clean channel), and see how the bit error rate is affected."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np # Math functions\n",
+ "import pandas as pd # DataFrame for storing results\n",
+ "from tqdm import tqdm_notebook # Progress bars\n",
+ "\n",
+ "# Define a progress bar helper\n",
+ "bar = lambda itr, desc: tqdm_notebook(\n",
+ " itr,\n",
+ " desc=desc,\n",
+ " bar_format='{n}/|/{percentage:3.0f}%'\n",
+ ")\n",
+ "\n",
+ "params = base_params()\n",
+ "results = pd.DataFrame()\n",
+ "\n",
+ "for snr in bar(np.arange(3, 5.5, step=0.25), 'SNR Loop'):\n",
+ " params['channel_params']['snr'] = snr\n",
+ " results = results.append(ol.run_block(**params), ignore_index=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can inspect the results as a table (with the `pandas` library). "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's plot this using `plotly`, hopefully getting that classic BER curve!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import plotly_express as px\n",
+ "\n",
+ "px.line(\n",
+ " results, x='snr', y='ber', # Data config\n",
+ " labels = {'snr': 'SNR (dB)', 'ber': 'Bit error probability'}, # Label config\n",
+ " template ='log_plot', height=400 # Appearance\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The Bit Error Rate plot shows that as the SNR increases (our signal gets less noisy) it becomes less likely that a bit is corrupted, so the bit error rate probability decreases\n",
+ "\n",
+ "## Comparing modulation schemes\n",
+ "\n",
+ "The next step is to run the BER vs SNR test for different modulation schemes and compare the results. Here we send over 200 different 40 Mb blocks with IP configuration and stats recovery in between.\n",
+ "Because this test will take just over 3 minutes to run, now would be a good time to take a short break.You could also read a little more about the [LDPC codes](http://www.bernh.net/media/download/papers/ldpc.pdf) we're using here... and at least we're not waiting on a software implementation of the same codes!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "params = base_params()\n",
+ "results = pd.DataFrame()\n",
+ "mod_schemes = [ModType.BPSK, ModType.QPSK, ModType.QAM16, ModType.QAM64]\n",
+ "\n",
+ "for mod_type in bar(mod_schemes, f'Modulation type loop'):\n",
+ " params['source_params']['mod_type'] = mod_type\n",
+ " for snr in bar(np.arange(3, 16, step=0.25), f'{mod_type.name} SNR Loop'):\n",
+ " params['channel_params']['snr'] = snr\n",
+ " results = results.append(ol.run_block(**params), ignore_index=True)\n",
+ "\n",
+ "results.to_csv('assets/ber_data.csv', mode='w', index=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's plot the results as before, but giving each modulation scheme a line with a unique colour. Note that we're only plotting BER test results that are statistically significant(ish) — i.e. we ignore runs with less than a minimum number of bits in error."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "px.line(results.query('_bit_errors>5'), x='snr', y='ber', color='mod_type',\n",
+ " labels = {'snr': 'SNR (dB)', 'ber': 'Bit error probability'},\n",
+ " category_orders={\"mod_type\": ['BPSK', 'QPSK', 'QAM16', 'QAM64']},\n",
+ " range_y = (-4.5, -0.4), template='log_plot', height=400\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that the legend in this plot is interactive! You can use it to select which modulation schemes are visible (single click an entry to hide it; double click to hide all others). We can see from the graph that QAM-64 needs the highest SNR to meet a fixed/acceptable BER, followed by QAM-16 and finally QPSK & BPSK.\n",
+ "\n",
+ "This matches our intuition: in general, the more complex modulation schemes are used to transmit more information in a given bandwidth. Consequently, they are more susceptible to errors in the presence of noise.\n",
+ "\n",
+ "Let's continue by looking at some of the other statistics available to us. We'll plot four subplots showing different stats vs SNR."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from plotly import tools\n",
+ "import plotly.offline as po\n",
+ "\n",
+ "sub_plot = lambda results, y_field: px.line(\n",
+ " results, x='snr', y=y_field, color='mod_type',\n",
+ " category_orders={\"mod_type\": ['BPSK', 'QPSK', 'QAM16', 'QAM64']},\n",
+ " template='log_plot'\n",
+ ")\n",
+ "\n",
+ "traces = [('Bit Error Rate' , 'Error probability' , 'ber' , 'log' , '_bit_errors>5', 1, 1, False),\n",
+ " ('Average Iterations', 'Iterations' , 'dec_avg_iters' , 'linear' , None , 1, 2, False),\n",
+ " ('Frame Error Rate' , 'Error Probability' , 'fer' , 'log' , '_bit_errors>5', 2, 1, False),\n",
+ " ('Decoder Throughput', 'Decoder Throughput (Gb/s)', 'dec_throughput', 'linear' , None , 2, 2, True )]\n",
+ "# Plot title Y-axis title Y data field Y-axis type Query filter Plot# Show legend? \n",
+ "\n",
+ "def matrix_plot(sub_plot, traces):\n",
+ " fig = tools.make_subplots(rows=2, cols=2, subplot_titles=list(map(lambda s:s[0], traces)), print_grid=False)\n",
+ "\n",
+ " for _, y_title, y_field, y_scale, query, index_v, index_h, legend in traces:\n",
+ " trace_dataset = results if query == None else results.query(query)\n",
+ " for trace in sub_plot(trace_dataset, y_field).data:\n",
+ " trace.showlegend = legend\n",
+ " subplot_name = str(index_h+2*(index_v-1))\n",
+ " x_axis = getattr(fig.layout, 'xaxis'+subplot_name)\n",
+ " x_axis.title = 'SNR (dB)'\n",
+ " y_axis = getattr(fig.layout, 'yaxis'+subplot_name)\n",
+ " y_axis.type=y_scale\n",
+ " y_axis.exponentformat = 'power' if y_scale == 'log' else 'none'\n",
+ " y_axis.title=y_title\n",
+ " fig.append_trace(trace, index_v, index_h)\n",
+ " \n",
+ " fig.layout.template = 'log_plot'\n",
+ " fig.layout.height = 500\n",
+ " po.iplot(fig)\n",
+ "\n",
+ "matrix_plot(sub_plot, traces)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "There are three patterns worth noting here:\n",
+ "\n",
+ " 1. The bit error rate has a direct effect on the frame error rate (this at least shows that the errors are evenly distrbuted between frames)\n",
+ " \n",
+ " 2. The average number of iterations starts at our maximum but drops as SNR increases\n",
+ " \n",
+ " 3. The decoder throughput *in this design* depends on a couple of factors, including:\n",
+ " * The average number of iterations — also influenced by the SNR.\n",
+ " \n",
+ " * The modulation scheme (QAM-64 transmits 6 bits of information with each symbol whereas BPSK only transmits 1 bit). Our channel model takes in symbols at a fixed rate, so the QPSK and BPSK curves are actually limited by the channel model and not by the FEC decoder. The QAM-16 curve, however, *is* limited by the FEC decoder block.\n",
+ "\n",
+ "The SNR of our signal not only impacts the BER we can achieve, but also the maximum throughput of the system. With the risk of being a bit too gimmicky, let's plot this relationship as a 3D scatter plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "px.scatter_3d(results, x='snr', y='ber', z='dec_throughput', color='mod_type',\n",
+ " labels = {'snr': 'SNR (dB)', 'ber': 'Bit error probability', 'dec_throughput': 'Throughput (Gb/s)'},\n",
+ " category_orders={\"mod_type\": ['BPSK', 'QPSK', 'QAM16', 'QAM64']},\n",
+ " template='log_plot', height=500)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Comparing LDPC codes\n",
+ "\n",
+ "One final parametric sweep we might want to look at is for testing different LDPC codes. We expect to see some trade-offs between throughput and BER performance.\n",
+ "\n",
+ "Let's have a quick look at how to configure these LDPC codes with the PYNQ SdFec driver. This driver is a Python wrapper around the [existing baremetal driver](https://github.com/Xilinx/embeddedsw/tree/release-2018.3/XilinxProcessorIPLib/drivers/sd_fec), with a few extra conveniences. Because of the way we parse bitstream metadata, the SdFec driver can extract all LDPC code parameters that have been preloaded in Vivado. We can now setup different codes by name rather than large C structures.\n",
+ "\n",
+ "We can ask the SdFec driver for a full list of LDPC codes preloaded in this design. There are many codes so let's only look at the first 5 or so:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ol.sd_fec_dec.available_ldpc_params()[:5]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's put a new code at the start of the SD-FEC decoder's look-up tables."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ol.sd_fec_dec.add_ldpc_params(0, 0, 0, 0, 'wifi802_11_cr1_2_1296')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can iterate a test over a subset of the available codes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "params = base_params()\n",
+ "params['fec_params']['max_iter'] = 16\n",
+ "params['source_params']['mod_type'] = ModType.QAM16\n",
+ "results = pd.DataFrame()\n",
+ "ldpc_codes = ['docsis_short', 'wifi802_11_cr2_3_1296', '5g_graph1_set1_l46_p32']\n",
+ "\n",
+ "# See ol.sd_fec_dec.available_ldpc_params() for a full list of included LDPC codes \n",
+ "for ldpc_code in bar(ldpc_codes, 'Code loop'): \n",
+ " params['fec_params']['code_name'] = ldpc_code\n",
+ " for snr in bar(np.arange(0, 10, step=0.5), f'{str(ldpc_code)} SNR loop'):\n",
+ " params['channel_params']['snr'] = snr\n",
+ " results = results.append(ol.run_block(**params), ignore_index=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's generate some plots with a unique colour for each LDPC code"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sub_plot = lambda results, y_field: px.line(\n",
+ " results, x='snr', y=y_field, color='code_name',\n",
+ " template='log_plot', height = 500\n",
+ ")\n",
+ "\n",
+ "matrix_plot(sub_plot, traces)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The set of codes selected have dramatic differences in performance! Note in particular the difference between throughput and bit error rate — a classic balancing act. The 5G code has a substantially lower throughput at these SNR values but is the clear winner in terms of error correction. \n",
+ "\n",
+ "## A note on performance\n",
+ "\n",
+ "It should be noted that the encoder typically has a much higher throughput than the decoder. This design includes a FIFO that allows performs some buffering of encoded data but if this becomes full the encoder IP is throttled. Therefore, to measure the throughput of the encoder the number of codeblocks run through the system should be limited such the encoded data FIFO does not fill. Generally limiting the number of blocks to 100 will ensure the encoded data FIFO does not fill.\n",
+ "\n",
+ "Also note that the channel model throughput is limited by the modulation type selected. The maximum throughput supported is using QAM-64 modulation (6 bits per symbol). With the channel model's 4 symbol wide input, this gives a maximum throughput of:\n",
+ "$$ 4 \\times 6\\ bits \\times 300\\ MHz = 7.2\\ Gb/s $$\n",
+ "\n",
+ "Let's run a small test and inspect the encoder and decoder throughputs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "params = base_params()\n",
+ "params['source_params']['mod_type'] = ModType.QAM64\n",
+ "params['source_params']['num_blocks'] = 100\n",
+ "params['channel_params']['snr'] = 16.0\n",
+ "\n",
+ "results = ol.run_block(**params)\n",
+ "enc_tp = results['enc_throughput']\n",
+ "dec_tp = results['dec_throughput']\n",
+ "print(f'Encoder throughput: {enc_tp} Gb/s \\t Decoder throughput: {dec_tp} Gb/s')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It's also good to note that these throughput stats for the 'docsis_short' code agree quite closely with the [official documentation](https://www.xilinx.com/support/documentation/ip_documentation/pl/sd-fec-throughput-latency.html#toc8).\n",
+ "\n",
+ "## Summary\n",
+ "\n",
+ "In this notebook we've:\n",
+ "\n",
+ " * Used PYNQ to interact with the SD-FEC blocks present on the RFSoC\n",
+ " * Looked at profiling the performance of the SD-FEC encoder and decoder blocks\n",
+ " * Taken an example SD-FEC design and demonstrated the benefits Python productivity:\n",
+ " + Performed parametric sweeps of SNR, modulation scheme, and LDPC codes...\n",
+ " + with interactive visualisations of the results...\n",
+ " + helping to learn about the relationship between the parameters and different performance metrics\n",
+ "\n",
+ "This design is open source and available [here](https://github.com/Xilinx/SDFEC-PYNQ)."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/boards/ZCU111/notebooks/assets/ldpc_fourney.svg b/boards/ZCU111/notebooks/assets/ldpc_fourney.svg
new file mode 100644
index 0000000..3716286
--- /dev/null
+++ b/boards/ZCU111/notebooks/assets/ldpc_fourney.svg
@@ -0,0 +1,2803 @@
+
+
+
+
\ No newline at end of file
diff --git a/boards/ZCU111/notebooks/assets/notebook_preview.png b/boards/ZCU111/notebooks/assets/notebook_preview.png
new file mode 100644
index 0000000..2bc6bde
Binary files /dev/null and b/boards/ZCU111/notebooks/assets/notebook_preview.png differ
diff --git a/boards/ZCU111/notebooks/assets/sd-fec-eval.svg b/boards/ZCU111/notebooks/assets/sd-fec-eval.svg
new file mode 100644
index 0000000..fe63164
--- /dev/null
+++ b/boards/ZCU111/notebooks/assets/sd-fec-eval.svg
@@ -0,0 +1,2598 @@
+
+
+
+
\ No newline at end of file
diff --git a/boards/ZCU111/srcs/zcu111_constraints.xdc b/boards/ZCU111/srcs/zcu111_constraints.xdc
new file mode 100644
index 0000000..672884e
--- /dev/null
+++ b/boards/ZCU111/srcs/zcu111_constraints.xdc
@@ -0,0 +1,23 @@
+# Clocks
+# o 100MHz board clock
+set_property IOSTANDARD LVDS [get_ports sys_diff_clock_clk_n]
+set_property IOSTANDARD LVDS [get_ports sys_diff_clock_clk_p]
+set_property PACKAGE_PIN AN15 [get_ports sys_diff_clock_clk_n]
+set_property PACKAGE_PIN AM15 [get_ports sys_diff_clock_clk_p]
+
+# Inputs
+set_property IOSTANDARD LVCMOS18 [get_ports reset]
+set_property PACKAGE_PIN AW3 [get_ports reset]
+
+# Status
+set_property IOSTANDARD LVCMOS18 [get_ports led_bits_tri_o[0]]
+set_property PACKAGE_PIN AR13 [get_ports led_bits_tri_o[0]]
+set_property IOSTANDARD LVCMOS18 [get_ports led_bits_tri_o[1]]
+set_property PACKAGE_PIN AP13 [get_ports led_bits_tri_o[1]]
+set_property IOSTANDARD LVCMOS18 [get_ports led_bits_tri_o[2]]
+set_property PACKAGE_PIN AR16 [get_ports led_bits_tri_o[2]]
+set_property IOSTANDARD LVCMOS18 [get_ports led_bits_tri_o[3]]
+set_property PACKAGE_PIN AP16 [get_ports led_bits_tri_o[3]]
+set_property IOSTANDARD LVCMOS18 [get_ports led_bits_tri_o[4]]
+set_property PACKAGE_PIN AP15 [get_ports led_bits_tri_o[4]]
+
diff --git a/rfsoc_sdfec/__init__.py b/rfsoc_sdfec/__init__.py
new file mode 100644
index 0000000..6dfda7a
--- /dev/null
+++ b/rfsoc_sdfec/__init__.py
@@ -0,0 +1,354 @@
+# Copyright (c) 2019, Xilinx, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import xsdfec
+
+import signal
+import plotly.io as pio
+import os
+from typing import Tuple
+from enum import Enum
+from pynq import Overlay, MMIO
+
+def install_notebooks(notebook_dir=None):
+ """Copy SDFEC notebooks to the filesystem
+
+ notebook_dir: str
+ An optional destination filepath. If None, assume PYNQ's default
+ jupyter_notebooks folder.
+ """
+ import shutil
+ from distutils.dir_util import copy_tree
+
+ if notebook_dir == None:
+ notebook_dir = os.environ['PYNQ_JUPYTER_NOTEBOOKS']
+ if not os.path.isdir(notebook_dir):
+ raise ValueError(
+ f'Directory {notebook_dir} does not exist. Please supply a `notebook_dir` argument.')
+
+ src_nb_dir = os.path.join(os.path.dirname(__file__), 'notebooks')
+ dst_nb_dir = os.path.join(notebook_dir, 'rfsoc_sdfec')
+ if os.path.exists(dst_nb_dir):
+ shutil.rmtree(dst_nb_dir)
+ copy_tree(src_nb_dir, dst_nb_dir)
+
+
+class ModType(Enum):
+ """Enum for modulation type"""
+ BPSK = 0
+ QPSK = 1
+ QAM16 = 2
+ QAM64 = 3
+
+
+class _SuppressedSIGINT(object):
+ def __enter__(self):
+ self._caught_sig = None
+ self._orig_handler = signal.signal(signal.SIGINT, self.handler)
+
+ def handler(self, sig, frame):
+ self._caught_sig = (sig, frame)
+
+ def __exit__(self, type, value, traceback):
+ signal.signal(signal.SIGINT, self._orig_handler)
+ if not self._caught_sig == None:
+ self._orig_handler(*self._signal_buf)
+
+
+class SdFecOverlay(Overlay):
+ """Overlay for SD FEC evaluation demo"""
+
+ def __init__(self, bitfile_name=None, dark_theme=False, **kwargs):
+ """Construct a new SdFecOverlay
+
+ bitfile_name: str
+ Optional bitstream filename. If None, we use the bitstream supplied with this package.
+ """
+
+ # Generate default bitfile name
+ if bitfile_name is None:
+ this_dir = os.path.dirname(__file__)
+ bitfile_name = os.path.join(this_dir, 'bitstreams', 'sdfec_pynq.bit')
+
+ # Build a default plotly template for our log plots
+ log_template = pio.templates['plotly_dark' if dark_theme else 'plotly_white']
+ log_template.layout.yaxis.exponentformat = 'power'
+ log_template.layout.yaxis.type = 'log'
+ log_template.layout.scene.yaxis.exponentformat = 'power'
+ log_template.layout.scene.yaxis.type = 'log'
+ log_template.layout.width = 1000
+ log_template.layout.height = 400
+ log_template.layout.autosize = False
+ log_template.layout.legend.x = 1.1
+ pio.templates['log_plot'] = log_template
+
+ # Create Overlay
+ super().__init__(bitfile_name, **kwargs)
+
+
+ def _collect_monitor_stats(self, mon, sys_clk=300e6):
+ first = int(mon.register_map.first_V)
+ last = int(mon.register_map.last_V)
+ stalled = int(mon.register_map.stalled_V)
+ iters = int(self.stats.register_map.iter_cnt_V)
+ blocks = int(self.stats.register_map.block_cnt_V)
+ k = int(self.stats.register_map.k_V)
+ return dict(
+ throughput = ((blocks-1) * k) / (last - first) * sys_clk / (2**30),
+ avg_iter = iters / blocks,
+ stalled = stalled
+ )
+
+
+ def run_block(self, source_params, fec_params, channel_params):
+ """Run the SD FEC test given source, FEC, and channel parameters.
+
+ See the `default_params` method for a reasonable set of default arguments.
+
+ source_params : dict
+ Configuration for number of blocks (num_blocks), zeroization (zero_data), and modulation type (mod_type).
+
+ fec_params : dict
+ Configuration for LDPC codes (code_name), max iterations (max_iter), and early termination (term_on_pass).
+
+ channel_params : dict
+ Configuration for SNR (snr), and chanel model bypass (skip_chan).
+
+ return : dict
+ A dict populated with test stats including BER, FER, and throughput.
+ """
+
+ # Check input parameter ranges
+ assert 0 <= channel_params['snr'] <= 16, "Argument 'channel_params['snr']' out of range"
+ assert 0 <= source_params['num_blocks'], "Argument 'source_params['num_blocks']' out of range"
+ assert 0 <= fec_params['max_iter'] , "Argument 'fec_params['max_iter']' out of range"
+
+ # Starting critical section, so delay any keyboard interrupts
+ with _SuppressedSIGINT():
+
+ # Setup SDFEC blocks
+ self.sd_fec_enc.CORE_ORDER = 0
+ self.sd_fec_dec.CORE_ORDER = 0
+ self.data_source.register_map.fec_type_V = 0 # LDPC = 0
+
+ k = self.sd_fec_dec._code_params.ldpc[fec_params['code_name']]['k']
+ n = self.sd_fec_dec._code_params.ldpc[fec_params['code_name']]['n']
+
+ self.sd_fec_enc.CORE_AXIS_ENABLE = 0
+ self.sd_fec_dec.CORE_AXIS_ENABLE = 0
+
+ self.sd_fec_enc.add_ldpc_params(0,0,0,0, fec_params['code_name'])
+ self.sd_fec_dec.add_ldpc_params(0,0,0,0, fec_params['code_name'])
+
+ self.sd_fec_enc.CORE_AXIS_ENABLE = 63
+ self.sd_fec_dec.CORE_AXIS_ENABLE = 63
+
+ # Setup data source
+ self.data_source.register_map.zero_data_V = source_params['zero_data']
+ self.data_source.register_map.mod_type_V = source_params['mod_type'].value
+ self.data_source.register_map.skip_chan_V = channel_params['skip_chan']
+ self.data_source.register_map.snr_V = int(channel_params['snr']*2048)
+ self.data_source.register_map.inv_sigma_sq_V = int(pow(10.0,channel_params['snr']/10)*1024)
+
+ word1, word2 = self._to_64bit_tuple(1<<14)
+ self.data_source.register_map.enc_ctrl_word_V_1 = word1
+ self.data_source.register_map.enc_ctrl_word_V_2 = word2
+ word1, word2 = self._to_64bit_tuple((1<<14) + (fec_params['term_on_pass'] << 16) + (fec_params['max_iter'] << 18))
+ self.data_source.register_map.dec_ctrl_word_V_1 = word1
+ self.data_source.register_map.dec_ctrl_word_V_2 = word2
+
+ self.data_source.register_map.num_blocks_V = source_params['num_blocks']
+ self.data_source.register_map.source_words_V = int((k+127)/128)
+ self.data_source.register_map.source_keep_V = 0xFFFFFFFF & self._calc_tkeep(k, 128)
+ word1, word2 = self._to_64bit_tuple(self._calc_tkeep(n, 96))
+ self.data_source.register_map.enc_keep_V_1 = word1
+ self.data_source.register_map.enc_keep_V_2 = word2
+ word1, word2 = self._to_64bit_tuple(self._calc_tkeep(k, 128))
+ self.data_source.register_map.dec_keep_V_1 = word1
+ self.data_source.register_map.dec_keep_V_2 = word2
+
+ self.data_source.register_map.chan_symbls_V = self._get_chan_symbols(source_params['mod_type'], n)
+ self.data_source.register_map.chan_rem_V = self._get_chan_rem(source_params['mod_type'], n)
+
+ # Setup stats block
+ self.stats.register_map.num_blocks_V = source_params['num_blocks']
+ self.stats.register_map.k_V = k
+ self.stats.register_map.n_V = n
+ self.stats.register_map.mask_V_1 = self._calc_stats_mask(k)[0]
+ self.stats.register_map.mask_V_2 = self._calc_stats_mask(k)[1]
+ self.stats.register_map.mask_V_3 = self._calc_stats_mask(k)[2]
+ self.stats.register_map.mask_V_4 = self._calc_stats_mask(k)[3]
+ self.stats.register_map.src_inc_parity_V = 0
+
+ # Setup stream monitors
+ self.enc_ip_mon.register_map.num_blocks_V = source_params['num_blocks']
+ self.enc_op_mon.register_map.num_blocks_V = source_params['num_blocks']
+ self.dec_ip_mon.register_map.num_blocks_V = source_params['num_blocks']
+ self.dec_op_mon.register_map.num_blocks_V = source_params['num_blocks']
+
+ # Start all blocks
+ self.enc_ip_mon.register_map.CTRL.AP_START = 1
+ self.enc_op_mon.register_map.CTRL.AP_START = 1
+ self.dec_ip_mon.register_map.CTRL.AP_START = 1
+ self.dec_op_mon.register_map.CTRL.AP_START = 1
+ self.stats.register_map.CTRL.AP_START = 1
+ self.data_source.register_map.CTRL.AP_START = 1
+
+ # Wait for end of test
+ while (self.stats.register_map.CTRL.AP_IDLE == 0):
+ pass
+
+ # Recover stats
+ block_cnt = int(self.stats.register_map.block_cnt_V)
+ bit_errs = int(self.stats.register_map.cor_berr_V)
+ frame_errs = int(self.stats.register_map.cor_blerr_V)
+ raw_bit_errs = int(self.stats.register_map.raw_berr_V)
+ raw_frame_errs = int(self.stats.register_map.raw_blerr_V)
+ k = int(self.stats.register_map.k_V)
+
+ enc_stats = self._collect_monitor_stats(self.enc_op_mon)
+ dec_stats = self._collect_monitor_stats(self.dec_op_mon)
+
+ return dict(
+ snr = channel_params['snr'],
+ mod_type = source_params['mod_type'].name,
+ code_name = fec_params['code_name'],
+ ber = bit_errs / (block_cnt * k),
+ fer = frame_errs / block_cnt,
+ raw_ber = raw_bit_errs / (block_cnt * k),
+ raw_fer = raw_frame_errs / block_cnt,
+ enc_throughput = enc_stats['throughput'],
+ enc_avg_iters = enc_stats['avg_iter'],
+ dec_throughput = dec_stats['throughput'],
+ dec_avg_iters = dec_stats['avg_iter'],
+ _bit_errors = bit_errs,
+ )
+
+
+ @staticmethod
+ def default_params():
+ source_params = dict(
+ mod_type = ModType.BPSK,
+ zero_data = False,
+ num_blocks = 10000,
+ )
+
+ fec_params = dict(
+ code_name = 'docsis_short',
+ max_iter = 8,
+ term_on_pass = False,
+ )
+
+ channel_params = dict(
+ snr = 5.0,
+ skip_chan = False,
+ )
+ return (source_params, fec_params, channel_params)
+
+ @staticmethod
+ def fold_stat_list(stats: list) -> dict:
+ """Fold/reduce a list of stat dicts into a single stat dict.
+
+ stats : list of dicts
+ A list of stat dicts. Each element is likely obtained from a `run_block` call
+
+ return : dict
+ A single dict with the combined stats
+ """
+ sum_stat = lambda key: sum(map(lambda s:s[key], stats))
+ avg_stat = lambda key: sum_stat(key) / len(stats)
+ return dict(
+ snr = stats[0]['snr'],
+ mod_type = stats[0]['mod_type'],
+ code_name = stats[0]['code_name'],
+ ber = avg_stat('ber'),
+ fer = avg_stat('fer'),
+ raw_ber = avg_stat('raw_ber'),
+ raw_fer = avg_stat('raw_fer'),
+ enc_throughput = avg_stat('enc_throughput'),
+ enc_avg_iters = avg_stat('enc_avg_iters'),
+ dec_throughput = avg_stat('dec_throughput'),
+ dec_avg_iters = avg_stat('dec_avg_iters'),
+ _bit_errors = sum_stat('_bit_errors'),
+ )
+
+ @staticmethod
+ def _calc_stats_mask(k: int) -> Tuple[int, int, int, int]:
+ bits = k % 128
+
+ # Short-circuit on zero bits
+ if bits == 0:
+ return (0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF)
+
+ masks = [0,0,0,0]
+ for i in range(4):
+ if bits >= 32:
+ masks[i] = 0xFFFFFFFF
+ bits -= 32
+ else:
+ masks[i] = 0x7FFFFFFF >> (31-bits)
+ bits = 0
+ return (masks[0], masks[1], masks[2], masks[3])
+
+
+ @staticmethod
+ def _calc_tkeep(pkt_size: int, per_trans: int, is_bits: bool = True) -> int:
+ rem = pkt_size % per_trans
+ if is_bits:
+ rem = int((rem + 7) / 8)
+ if rem == 0:
+ return 0xFFFFFFFF
+ else:
+ return (1 << rem) - 1
+
+
+ @staticmethod
+ def _to_64bit_tuple(val: int) -> Tuple[int, int]:
+ return (val & 0xFFFFFFFF, (val >> 32) & 0xFFFFFFFF)
+
+
+ @staticmethod
+ def _get_mod_n(mod_type: ModType) -> int:
+ return {
+ 'BPSK': 4,
+ 'QPSK': 8,
+ 'QAM16': 12,
+ 'QAM64': 24,
+ }[mod_type.name]
+
+
+ @staticmethod
+ def _get_chan_symbols(mod_type: ModType, n: int) -> int:
+ x = SdFecOverlay._get_mod_n(mod_type)
+ return int((n+x-1) / x)
+
+ @staticmethod
+ def _get_chan_rem(mod_type: ModType, n: int) -> int:
+ return int(n % SdFecOverlay._get_mod_n(mod_type))
+
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..e78cfbc
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2019, Xilinx, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+import subprocess
+import shutil
+from distutils.dir_util import copy_tree
+
+from setuptools import find_packages, setup
+
+# global variables
+board = os.environ['BOARD']
+repo_board_folder = f'boards/{board}/'
+hw_data_files = []
+
+
+def check_env():
+ """Check if we're running on PYNQ and if the board is supported."""
+ if not os.path.isdir(repo_board_folder):
+ raise ValueError("Board {} is not supported.".format(board))
+
+
+def copy_board_files(subdir):
+ """Copy files from board directory to package"""
+ src_dir = os.path.join(repo_board_folder, subdir)
+ dst_dir = os.path.join('rfsoc_sdfec', subdir)
+ copy_tree(src_dir, dst_dir)
+ for new_file_dir, _, new_files in os.walk(dst_dir):
+ hw_data_files.extend(
+ [os.path.join("..", new_file_dir, f) for f in new_files]
+ )
+
+
+check_env()
+make_command = ["make", "-C", repo_board_folder]
+if subprocess.call(make_command) != 0:
+ sys.exit(-1)
+copy_board_files('notebooks')
+copy_board_files('bitstreams')
+
+
+setup(
+ name="rfsoc_sdfec",
+ version='1.0',
+ install_requires=[
+ 'pynq>=2.4',
+ 'plotly>=3.8.1',
+ 'plotly-express>=0.1.7',
+ 'tqdm>=4.31.1'
+ ],
+ url='https://github.com/Xilinx/SDFEC-PYNQ',
+ license='BSD 3-Clause License',
+ author="Craig Ramsay",
+ author_email="cramsay@xilinx.com",
+ packages=find_packages(),
+ package_data={'': hw_data_files},
+ description="PYNQ example of using the RFSoC's Soft Decision Forward Error Correction (SD-FEC) IP")